Software inventorycube (#7651)

Fixes missing/upside-down images on Android.
This commit is contained in:
Vitaliy 2018-09-29 21:40:17 +03:00 committed by Paramat
parent 5f0c969485
commit 18a8fbf465
6 changed files with 108 additions and 307 deletions

@ -719,10 +719,6 @@ ambient_occlusion_gamma (Ambient occlusion gamma) float 2.2 0.25 4.0
# Enables animation of inventory items.
inventory_items_animations (Inventory items animations) bool false
# Android systems only: Tries to create inventory textures from meshes
# when no supported render was found.
inventory_image_hack (Inventory image hack) bool false
# Fraction of the visible distance at which fog starts to be rendered
fog_start (Fog start) float 0.4 0.0 0.99

@ -39,8 +39,6 @@ file can usually be found at /mnt/sdcard/Minetest.
* gui_scaling: this is a user-specified scaling factor for the GUI- In case
main menu is too big or small on your device, try changing this
value.
* inventory_image_hack: if your inventory items are messed up, try setting
this to true
Known issues
------------

@ -841,11 +841,6 @@
# type: bool
# inventory_items_animations = false
# Android systems only: Tries to create inventory textures from meshes
# when no supported render was found.
# type: bool
# inventory_image_hack = false
# Fraction of the visible distance at which fog starts to be rendered
# type: float min: 0 max: 0.99
# fog_start = 0.4

@ -19,6 +19,7 @@ with this program; if not, write to the Free Software Foundation, Inc.,
#include "tile.h"
#include <algorithm>
#include <ICameraSceneNode.h>
#include "util/string.h"
#include "util/container.h"
@ -362,12 +363,6 @@ public:
// Shall be called from the main thread.
void rebuildImagesAndTextures();
// Render a mesh to a texture.
// Returns NULL if render-to-texture failed.
// Shall be called from the main thread.
video::ITexture* generateTextureFromMesh(
const TextureFromMeshParams &params);
video::ITexture* getNormalTexture(const std::string &name);
video::SColor getTextureAverageColor(const std::string &name);
video::ITexture *getShaderFlagsTexture(bool normamap_present);
@ -775,205 +770,121 @@ void TextureSource::rebuildImagesAndTextures()
}
}
video::ITexture* TextureSource::generateTextureFromMesh(
const TextureFromMeshParams &params)
inline static void applyShadeFactor(video::SColor &color, u32 factor)
{
u32 f = core::clamp<u32>(factor, 0, 256);
color.setRed(color.getRed() * f / 256);
color.setGreen(color.getGreen() * f / 256);
color.setBlue(color.getBlue() * f / 256);
}
static video::IImage *createInventoryCubeImage(
video::IImage *top, video::IImage *left, video::IImage *right)
{
core::dimension2du size_top = top->getDimension();
core::dimension2du size_left = left->getDimension();
core::dimension2du size_right = right->getDimension();
u32 size = npot2(std::max({
size_top.Width, size_top.Height,
size_left.Width, size_left.Height,
size_right.Width, size_right.Height,
}));
// It must be divisible by 4, to let everything work correctly.
// But it is a power of 2, so being at least 4 is the same.
// And the resulting texture should't be too large as well.
size = core::clamp<u32>(size, 4, 64);
// With such parameters, the cube fits exactly, touching each image line
// from `0` to `cube_size - 1`. (Note that division is exact here).
u32 cube_size = 9 * size;
u32 offset = size / 2;
video::IVideoDriver *driver = RenderingEngine::get_video_driver();
sanity_check(driver);
#ifdef __ANDROID__
const GLubyte* renderstr = glGetString(GL_RENDERER);
std::string renderer((char*) renderstr);
// use no render to texture hack
if (
(renderer.find("Adreno") != std::string::npos) ||
(renderer.find("Mali") != std::string::npos) ||
(renderer.find("Immersion") != std::string::npos) ||
(renderer.find("Tegra") != std::string::npos) ||
g_settings->getBool("inventory_image_hack")
) {
// Get a scene manager
scene::ISceneManager *smgr_main = RenderingEngine::get_raw_device()->getSceneManager();
sanity_check(smgr_main);
scene::ISceneManager *smgr = smgr_main->createNewSceneManager();
sanity_check(smgr);
const float scaling = 0.2;
scene::IMeshSceneNode* meshnode =
smgr->addMeshSceneNode(params.mesh, NULL,
-1, v3f(0,0,0), v3f(0,0,0),
v3f(1.0 * scaling,1.0 * scaling,1.0 * scaling), true);
meshnode->setMaterialFlag(video::EMF_LIGHTING, true);
meshnode->setMaterialFlag(video::EMF_ANTI_ALIASING, true);
meshnode->setMaterialFlag(video::EMF_TRILINEAR_FILTER, m_setting_trilinear_filter);
meshnode->setMaterialFlag(video::EMF_BILINEAR_FILTER, m_setting_bilinear_filter);
meshnode->setMaterialFlag(video::EMF_ANISOTROPIC_FILTER, m_setting_anisotropic_filter);
scene::ICameraSceneNode* camera = smgr->addCameraSceneNode(0,
params.camera_position, params.camera_lookat);
// second parameter of setProjectionMatrix (isOrthogonal) is ignored
camera->setProjectionMatrix(params.camera_projection_matrix, false);
smgr->setAmbientLight(params.ambient_light);
smgr->addLightSceneNode(0,
params.light_position,
params.light_color,
params.light_radius*scaling);
core::dimension2d<u32> screen = driver->getScreenSize();
// Render scene
driver->beginScene(true, true, video::SColor(0,0,0,0));
driver->clearZBuffer();
smgr->drawAll();
core::dimension2d<u32> partsize(screen.Width * scaling,screen.Height * scaling);
irr::video::IImage* rawImage =
driver->createImage(irr::video::ECF_A8R8G8B8, partsize);
u8* pixels = static_cast<u8*>(rawImage->lock());
if (!pixels)
{
rawImage->drop();
return NULL;
auto lock_image = [size, driver] (video::IImage *&image) -> const u32 * {
image->grab();
core::dimension2du dim = image->getDimension();
video::ECOLOR_FORMAT format = image->getColorFormat();
if (dim.Width != size || dim.Height != size || format != video::ECF_A8R8G8B8) {
video::IImage *scaled = driver->createImage(video::ECF_A8R8G8B8, {size, size});
image->copyToScaling(scaled);
image->drop();
image = scaled;
}
sanity_check(image->getPitch() == 4 * size);
return reinterpret_cast<u32 *>(image->lock());
};
auto free_image = [] (video::IImage *image) -> void {
image->unlock();
image->drop();
};
core::rect<s32> source(
screen.Width /2 - (screen.Width * (scaling / 2)),
screen.Height/2 - (screen.Height * (scaling / 2)),
screen.Width /2 + (screen.Width * (scaling / 2)),
screen.Height/2 + (screen.Height * (scaling / 2))
);
video::IImage *result = driver->createImage(video::ECF_A8R8G8B8, {cube_size, cube_size});
sanity_check(result->getPitch() == 4 * cube_size);
result->fill(video::SColor(0x00000000u));
u32 *target = reinterpret_cast<u32 *>(result->lock());
glReadPixels(source.UpperLeftCorner.X, source.UpperLeftCorner.Y,
partsize.Width, partsize.Height, GL_RGBA,
GL_UNSIGNED_BYTE, pixels);
driver->endScene();
// Drop scene manager
smgr->drop();
unsigned int pixelcount = partsize.Width*partsize.Height;
u8* runptr = pixels;
for (unsigned int i=0; i < pixelcount; i++) {
u8 B = *runptr;
u8 G = *(runptr+1);
u8 R = *(runptr+2);
u8 A = *(runptr+3);
//BGRA -> RGBA
*runptr = R;
runptr ++;
*runptr = G;
runptr ++;
*runptr = B;
runptr ++;
*runptr = A;
runptr ++;
// Draws single cube face
// `shade_factor` is face brightness, in range [0.0, 1.0]
// (xu, xv, x1; yu, yv, y1) form coordinate transformation matrix
// `offsets` list pixels to be drawn for single source pixel
auto draw_image = [=] (video::IImage *image, float shade_factor,
s16 xu, s16 xv, s16 x1,
s16 yu, s16 yv, s16 y1,
std::initializer_list<v2s16> offsets) -> void {
u32 brightness = core::clamp<u32>(256 * shade_factor, 0, 256);
const u32 *source = lock_image(image);
for (u16 v = 0; v < size; v++) {
for (u16 u = 0; u < size; u++) {
video::SColor pixel(*source);
applyShadeFactor(pixel, brightness);
s16 x = xu * u + xv * v + x1;
s16 y = yu * u + yv * v + y1;
for (const auto &off : offsets)
target[(y + off.Y) * cube_size + (x + off.X) + offset] = pixel.color;
source++;
}
}
free_image(image);
};
video::IImage* inventory_image =
driver->createImage(irr::video::ECF_A8R8G8B8, params.dim);
draw_image(top, 1.000000f,
4, -4, 4 * (size - 1),
2, 2, 0,
{
{2, 0}, {3, 0}, {4, 0}, {5, 0},
{0, 1}, {1, 1}, {2, 1}, {3, 1}, {4, 1}, {5, 1}, {6, 1}, {7, 1},
{2, 2}, {3, 2}, {4, 2}, {5, 2},
});
rawImage->copyToScaling(inventory_image);
rawImage->drop();
draw_image(left, 0.836660f,
4, 0, 0,
2, 5, 2 * size,
{
{0, 0}, {1, 0},
{0, 1}, {1, 1}, {2, 1}, {3, 1},
{0, 2}, {1, 2}, {2, 2}, {3, 2},
{0, 3}, {1, 3}, {2, 3}, {3, 3},
{0, 4}, {1, 4}, {2, 4}, {3, 4},
{2, 5}, {3, 5},
});
guiScalingCache(io::path(params.rtt_texture_name.c_str()), driver, inventory_image);
draw_image(right, 0.670820f,
4, 0, 4 * size,
-2, 5, 4 * size - 2,
{
{2, 0}, {3, 0},
{0, 1}, {1, 1}, {2, 1}, {3, 1},
{0, 2}, {1, 2}, {2, 2}, {3, 2},
{0, 3}, {1, 3}, {2, 3}, {3, 3},
{0, 4}, {1, 4}, {2, 4}, {3, 4},
{0, 5}, {1, 5},
});
video::ITexture *rtt = driver->addTexture(params.rtt_texture_name.c_str(), inventory_image);
inventory_image->drop();
if (rtt == NULL) {
errorstream << "TextureSource::generateTextureFromMesh(): failed to recreate texture from image: " << params.rtt_texture_name << std::endl;
return NULL;
}
driver->makeColorKeyTexture(rtt, v2s32(0,0));
if (params.delete_texture_on_shutdown)
m_texture_trash.push_back(rtt);
return rtt;
}
#endif
if (!driver->queryFeature(video::EVDF_RENDER_TO_TARGET)) {
static bool warned = false;
if (!warned)
{
errorstream<<"TextureSource::generateTextureFromMesh(): "
<<"EVDF_RENDER_TO_TARGET not supported."<<std::endl;
warned = true;
}
return NULL;
}
// Create render target texture
video::ITexture *rtt = driver->addRenderTargetTexture(
params.dim, params.rtt_texture_name.c_str(),
video::ECF_A8R8G8B8);
if (rtt == NULL)
{
errorstream<<"TextureSource::generateTextureFromMesh(): "
<<"addRenderTargetTexture returned NULL."<<std::endl;
return NULL;
}
// Set render target
if (!driver->setRenderTarget(rtt, false, true, video::SColor(0,0,0,0))) {
driver->removeTexture(rtt);
errorstream<<"TextureSource::generateTextureFromMesh(): "
<<"failed to set render target"<<std::endl;
return NULL;
}
// Get a scene manager
scene::ISceneManager *smgr_main = RenderingEngine::get_scene_manager();
assert(smgr_main);
scene::ISceneManager *smgr = smgr_main->createNewSceneManager();
assert(smgr);
scene::IMeshSceneNode* meshnode =
smgr->addMeshSceneNode(params.mesh, NULL,
-1, v3f(0,0,0), v3f(0,0,0), v3f(1,1,1), true);
meshnode->setMaterialFlag(video::EMF_LIGHTING, true);
meshnode->setMaterialFlag(video::EMF_ANTI_ALIASING, true);
meshnode->setMaterialFlag(video::EMF_TRILINEAR_FILTER, m_setting_trilinear_filter);
meshnode->setMaterialFlag(video::EMF_BILINEAR_FILTER, m_setting_bilinear_filter);
meshnode->setMaterialFlag(video::EMF_ANISOTROPIC_FILTER, m_setting_anisotropic_filter);
scene::ICameraSceneNode* camera = smgr->addCameraSceneNode(0,
params.camera_position, params.camera_lookat);
// second parameter of setProjectionMatrix (isOrthogonal) is ignored
camera->setProjectionMatrix(params.camera_projection_matrix, false);
smgr->setAmbientLight(params.ambient_light);
smgr->addLightSceneNode(0,
params.light_position,
params.light_color,
params.light_radius);
// Render scene
driver->beginScene(true, true, video::SColor(0,0,0,0));
smgr->drawAll();
driver->endScene();
// Drop scene manager
smgr->drop();
// Unset render target
driver->setRenderTarget(0, false, true, video::SColor(0,0,0,0));
if (params.delete_texture_on_shutdown)
m_texture_trash.push_back(rtt);
return rtt;
result->unlock();
return result;
}
video::IImage* TextureSource::generateImage(const std::string &name)
@ -1516,89 +1427,14 @@ bool TextureSource::generateImagePart(std::string part_of_name,
return true;
}
#ifdef __ANDROID__
assert(img_top->getDimension().Height == npot2(img_top->getDimension().Height));
assert(img_top->getDimension().Width == npot2(img_top->getDimension().Width));
baseimg = createInventoryCubeImage(img_top, img_left, img_right);
assert(img_left->getDimension().Height == npot2(img_left->getDimension().Height));
assert(img_left->getDimension().Width == npot2(img_left->getDimension().Width));
assert(img_right->getDimension().Height == npot2(img_right->getDimension().Height));
assert(img_right->getDimension().Width == npot2(img_right->getDimension().Width));
#endif
// Create textures from images
video::ITexture *texture_top = driver->addTexture(
(imagename_top + "__temp__").c_str(), img_top);
video::ITexture *texture_left = driver->addTexture(
(imagename_left + "__temp__").c_str(), img_left);
video::ITexture *texture_right = driver->addTexture(
(imagename_right + "__temp__").c_str(), img_right);
FATAL_ERROR_IF(!(texture_top && texture_left && texture_right), "");
// Drop images
// Face images are not needed anymore
img_top->drop();
img_left->drop();
img_right->drop();
/*
Draw a cube mesh into a render target texture
*/
scene::IMesh* cube = createCubeMesh(v3f(1, 1, 1));
setMeshColor(cube, video::SColor(255, 255, 255, 255));
cube->getMeshBuffer(0)->getMaterial().setTexture(0, texture_top);
cube->getMeshBuffer(1)->getMaterial().setTexture(0, texture_top);
cube->getMeshBuffer(2)->getMaterial().setTexture(0, texture_right);
cube->getMeshBuffer(3)->getMaterial().setTexture(0, texture_right);
cube->getMeshBuffer(4)->getMaterial().setTexture(0, texture_left);
cube->getMeshBuffer(5)->getMaterial().setTexture(0, texture_left);
TextureFromMeshParams params;
params.mesh = cube;
params.dim.set(64, 64);
params.rtt_texture_name = part_of_name + "_RTT";
// We will delete the rtt texture ourselves
params.delete_texture_on_shutdown = false;
params.camera_position.set(0, 1.0, -1.5);
params.camera_position.rotateXZBy(45);
params.camera_lookat.set(0, 0, 0);
// Set orthogonal projection
params.camera_projection_matrix.buildProjectionMatrixOrthoLH(
1.65, 1.65, 0, 100);
params.ambient_light.set(1.0, 0.2, 0.2, 0.2);
params.light_position.set(10, 100, -50);
params.light_color.set(1.0, 0.5, 0.5, 0.5);
params.light_radius = 1000;
video::ITexture *rtt = generateTextureFromMesh(params);
// Drop mesh
cube->drop();
// Free textures
driver->removeTexture(texture_top);
driver->removeTexture(texture_left);
driver->removeTexture(texture_right);
if (rtt == NULL) {
baseimg = generateImage(imagename_top);
return true;
}
// Create image of render target
video::IImage *image = driver->createImage(rtt, v2s32(0, 0), params.dim);
FATAL_ERROR_IF(!image, "Could not create image of render target");
// Cleanup texture
driver->removeTexture(rtt);
baseimg = driver->createImage(video::ECF_A8R8G8B8, params.dim);
if (image) {
image->copyTo(baseimg);
image->drop();
}
return true;
}
/*
[lowpart:percent:filename

@ -67,25 +67,6 @@ std::string getTexturePath(const std::string &filename);
void clearTextureNameCache();
/*
ITextureSource::generateTextureFromMesh parameters
*/
namespace irr {namespace scene {class IMesh;}}
struct TextureFromMeshParams
{
scene::IMesh *mesh = nullptr;
core::dimension2d<u32> dim;
std::string rtt_texture_name;
bool delete_texture_on_shutdown;
v3f camera_position;
v3f camera_lookat;
core::CMatrix4<f32> camera_projection_matrix;
video::SColorf ambient_light;
v3f light_position;
video::SColorf light_color;
f32 light_radius;
};
/*
TextureSource creates and caches textures.
*/
@ -123,8 +104,6 @@ public:
*/
virtual Palette* getPalette(const std::string &name) = 0;
virtual bool isKnownSourceImage(const std::string &name)=0;
virtual video::ITexture* generateTextureFromMesh(
const TextureFromMeshParams &params)=0;
virtual video::ITexture* getNormalTexture(const std::string &name)=0;
virtual video::SColor getTextureAverageColor(const std::string &name)=0;
virtual video::ITexture *getShaderFlagsTexture(bool normalmap_present)=0;
@ -143,8 +122,6 @@ public:
virtual video::ITexture* getTexture(
const std::string &name, u32 *id = nullptr)=0;
virtual bool isKnownSourceImage(const std::string &name)=0;
virtual video::ITexture* generateTextureFromMesh(
const TextureFromMeshParams &params)=0;
virtual void processQueue()=0;
virtual void insertSourceImage(const std::string &name, video::IImage *img)=0;

@ -427,7 +427,6 @@ void set_default_settings(Settings *settings)
settings->setDefault("chunksize", "5");
settings->setDefault("viewing_range", "50");
settings->setDefault("inventory_image_hack", "false");
// Check for a device with a small screen
float x_inches = ((double) porting::getDisplaySize().X /