forked from Mirrorlandia_minetest/minetest
Cache serialized mapblocks during sending
This reduces the (absolute) time spent in Server::SendBlocks() from 700ms to 300ms (relative) share of MapBlock::serialize() from 80% to 60% in a test setup with 10 players and many block changes
This commit is contained in:
parent
7fff9da71d
commit
1fa4f58080
@ -2326,22 +2326,34 @@ void Server::sendMetadataChanged(const std::list<v3s16> &meta_updates, float far
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Server::SendBlockNoLock(session_t peer_id, MapBlock *block, u8 ver,
|
void Server::SendBlockNoLock(session_t peer_id, MapBlock *block, u8 ver,
|
||||||
u16 net_proto_version)
|
u16 net_proto_version, SerializedBlockCache *cache)
|
||||||
{
|
{
|
||||||
/*
|
|
||||||
Create a packet with the block in the right format
|
|
||||||
*/
|
|
||||||
thread_local const int net_compression_level = rangelim(g_settings->getS16("map_compression_level_net"), -1, 9);
|
thread_local const int net_compression_level = rangelim(g_settings->getS16("map_compression_level_net"), -1, 9);
|
||||||
|
std::string s, *sptr = nullptr;
|
||||||
|
|
||||||
|
if (cache) {
|
||||||
|
auto it = cache->find({block->getPos(), ver});
|
||||||
|
if (it != cache->end())
|
||||||
|
sptr = &it->second;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serialize the block in the right format
|
||||||
|
if (!sptr) {
|
||||||
std::ostringstream os(std::ios_base::binary);
|
std::ostringstream os(std::ios_base::binary);
|
||||||
block->serialize(os, ver, false, net_compression_level);
|
block->serialize(os, ver, false, net_compression_level);
|
||||||
block->serializeNetworkSpecific(os);
|
block->serializeNetworkSpecific(os);
|
||||||
std::string s = os.str();
|
s = os.str();
|
||||||
|
sptr = &s;
|
||||||
NetworkPacket pkt(TOCLIENT_BLOCKDATA, 2 + 2 + 2 + s.size(), peer_id);
|
}
|
||||||
|
|
||||||
|
NetworkPacket pkt(TOCLIENT_BLOCKDATA, 2 + 2 + 2 + sptr->size(), peer_id);
|
||||||
pkt << block->getPos();
|
pkt << block->getPos();
|
||||||
pkt.putRawString(s.c_str(), s.size());
|
pkt.putRawString(*sptr);
|
||||||
Send(&pkt);
|
Send(&pkt);
|
||||||
|
|
||||||
|
// Store away in cache
|
||||||
|
if (cache && sptr == &s)
|
||||||
|
(*cache)[{block->getPos(), ver}] = std::move(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Server::SendBlocks(float dtime)
|
void Server::SendBlocks(float dtime)
|
||||||
@ -2351,7 +2363,7 @@ void Server::SendBlocks(float dtime)
|
|||||||
|
|
||||||
std::vector<PrioritySortedBlockTransfer> queue;
|
std::vector<PrioritySortedBlockTransfer> queue;
|
||||||
|
|
||||||
u32 total_sending = 0;
|
u32 total_sending = 0, unique_clients = 0;
|
||||||
|
|
||||||
{
|
{
|
||||||
ScopeProfiler sp2(g_profiler, "Server::SendBlocks(): Collect list");
|
ScopeProfiler sp2(g_profiler, "Server::SendBlocks(): Collect list");
|
||||||
@ -2366,7 +2378,9 @@ void Server::SendBlocks(float dtime)
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
total_sending += client->getSendingCount();
|
total_sending += client->getSendingCount();
|
||||||
|
const auto old_count = queue.size();
|
||||||
client->GetNextBlocks(m_env,m_emerge, dtime, queue);
|
client->GetNextBlocks(m_env,m_emerge, dtime, queue);
|
||||||
|
unique_clients += queue.size() > old_count ? 1 : 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2385,6 +2399,12 @@ void Server::SendBlocks(float dtime)
|
|||||||
ScopeProfiler sp(g_profiler, "Server::SendBlocks(): Send to clients");
|
ScopeProfiler sp(g_profiler, "Server::SendBlocks(): Send to clients");
|
||||||
Map &map = m_env->getMap();
|
Map &map = m_env->getMap();
|
||||||
|
|
||||||
|
SerializedBlockCache cache, *cache_ptr = nullptr;
|
||||||
|
if (unique_clients > 1) {
|
||||||
|
// caching is pointless with a single client
|
||||||
|
cache_ptr = &cache;
|
||||||
|
}
|
||||||
|
|
||||||
for (const PrioritySortedBlockTransfer &block_to_send : queue) {
|
for (const PrioritySortedBlockTransfer &block_to_send : queue) {
|
||||||
if (total_sending >= max_blocks_to_send)
|
if (total_sending >= max_blocks_to_send)
|
||||||
break;
|
break;
|
||||||
@ -2399,7 +2419,7 @@ void Server::SendBlocks(float dtime)
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
SendBlockNoLock(block_to_send.peer_id, block, client->serialization_version,
|
SendBlockNoLock(block_to_send.peer_id, block, client->serialization_version,
|
||||||
client->net_proto_version);
|
client->net_proto_version, cache_ptr);
|
||||||
|
|
||||||
client->SentBlock(block_to_send.pos);
|
client->SentBlock(block_to_send.pos);
|
||||||
total_sending++;
|
total_sending++;
|
||||||
|
14
src/server.h
14
src/server.h
@ -424,6 +424,16 @@ private:
|
|||||||
std::unordered_set<session_t> waiting_players;
|
std::unordered_set<session_t> waiting_players;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// the standard library does not implement std::hash for pairs so we have this:
|
||||||
|
struct SBCHash {
|
||||||
|
size_t operator() (const std::pair<v3s16, u16> &p) const {
|
||||||
|
return (((size_t) p.first.X) << 48) | (((size_t) p.first.Y) << 32) |
|
||||||
|
(((size_t) p.first.Z) << 16) | ((size_t) p.second);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef std::unordered_map<std::pair<v3s16, u16>, std::string, SBCHash> SerializedBlockCache;
|
||||||
|
|
||||||
void init();
|
void init();
|
||||||
|
|
||||||
void SendMovement(session_t peer_id);
|
void SendMovement(session_t peer_id);
|
||||||
@ -484,7 +494,9 @@ private:
|
|||||||
float far_d_nodes = 100);
|
float far_d_nodes = 100);
|
||||||
|
|
||||||
// Environment and Connection must be locked when called
|
// Environment and Connection must be locked when called
|
||||||
void SendBlockNoLock(session_t peer_id, MapBlock *block, u8 ver, u16 net_proto_version);
|
// `cache` may only be very short lived! (invalidation not handeled)
|
||||||
|
void SendBlockNoLock(session_t peer_id, MapBlock *block, u8 ver,
|
||||||
|
u16 net_proto_version, SerializedBlockCache *cache = nullptr);
|
||||||
|
|
||||||
// Sends blocks to clients (locks env and con on its own)
|
// Sends blocks to clients (locks env and con on its own)
|
||||||
void SendBlocks(float dtime);
|
void SendBlocks(float dtime);
|
||||||
|
Loading…
Reference in New Issue
Block a user