From a450301686d31f829b44c571e5e731bd4e18b755 Mon Sep 17 00:00:00 2001 From: grorp Date: Mon, 28 Oct 2024 15:57:22 +0100 Subject: [PATCH] Fix server steps shorter than dedicated_server_step since #13370 (#15330) Co-authored-by: Desour Co-authored-by: sfan5 --- builtin/settingtypes.txt | 2 ++ doc/lua_api.md | 4 ---- src/server.cpp | 25 ++++++++++++++----------- src/server.h | 4 +++- 4 files changed, 19 insertions(+), 16 deletions(-) diff --git a/builtin/settingtypes.txt b/builtin/settingtypes.txt index 08902521e..3e23cd9c5 100644 --- a/builtin/settingtypes.txt +++ b/builtin/settingtypes.txt @@ -2067,6 +2067,8 @@ ask_reconnect_on_crash (Ask to reconnect after crash) bool false # Length of a server tick (the interval at which everything is generally updated), # stated in seconds. # Does not apply to sessions hosted from the client menu. +# This is a lower bound, i.e. server steps may not be shorter than this, but +# they are often longer. dedicated_server_step (Dedicated server step) float 0.09 0.0 1.0 # Whether players are shown to clients without any range limit. diff --git a/doc/lua_api.md b/doc/lua_api.md index e93e18c2a..6db358c85 100644 --- a/doc/lua_api.md +++ b/doc/lua_api.md @@ -6926,10 +6926,6 @@ Timing * `time` is a lower bound. The job is executed in the first server-step that started at least `time` seconds after the last time a server-step started, measured with globalstep dtime. - * In particular this can result in relatively large delays if `time` is close - to the server-step dtime. For example, with a target server-step of 0.09 s, - `core.after(0.09, ...)` often waits two steps, resulting in a delay of about - 0.18 s. * If `time` is `0`, the job is executed in the next step. * `job:cancel()` diff --git a/src/server.cpp b/src/server.cpp index 6b1dfffac..531eaf664 100644 --- a/src/server.cpp +++ b/src/server.cpp @@ -147,7 +147,8 @@ void *ServerThread::run() try { // see explanation inside - if (dtime > step_settings.steplen) + // (+1 ms, because we don't sleep more fine-grained) + if (dtime > step_settings.steplen + 0.001f) m_server->yieldToOtherThreads(dtime); m_server->AsyncRunStep(step_settings.pause ? 0.0f : dtime); @@ -1084,15 +1085,15 @@ void Server::AsyncRunStep(float dtime, bool initial_step) m_shutdown_state.tick(dtime, this); } -void Server::Receive(float timeout) +void Server::Receive(float min_time) { ZoneScoped; auto framemarker = FrameMarker("Server::Receive()-frame").started(); const u64 t0 = porting::getTimeUs(); - const float timeout_us = timeout * 1e6f; + const float min_time_us = min_time * 1e6f; auto remaining_time_us = [&]() -> float { - return std::max(0.0f, timeout_us - (porting::getTimeUs() - t0)); + return std::max(0.0f, min_time_us - (porting::getTimeUs() - t0)); }; NetworkPacket pkt; @@ -1101,15 +1102,17 @@ void Server::Receive(float timeout) pkt.clear(); peer_id = 0; try { - if (!m_con->ReceiveTimeoutMs(&pkt, - (u32)remaining_time_us() / 1000)) { + // Round up since the target step length is the minimum step length, + // we only have millisecond precision and we don't want to busy-wait + // by calling ReceiveTimeoutMs(.., 0) repeatedly. + const u32 cur_timeout_ms = std::ceil(remaining_time_us() / 1000.0f); + + if (!m_con->ReceiveTimeoutMs(&pkt, cur_timeout_ms)) { // No incoming data. - // Already break if there's 1ms left, as ReceiveTimeoutMs is too coarse - // and a faster server-step is better than busy waiting. - if (remaining_time_us() < 1000.0f) - break; - else + if (remaining_time_us() > 0.0f) continue; + else + break; } peer_id = pkt.getPeerId(); diff --git a/src/server.h b/src/server.h index f2a9083b6..69dace6d5 100644 --- a/src/server.h +++ b/src/server.h @@ -205,7 +205,9 @@ public: // This is run by ServerThread and does the actual processing void AsyncRunStep(float dtime, bool initial_step = false); - void Receive(float timeout); + /// Receive and process all incoming packets. Sleep if the time goal isn't met. + /// @param min_time minimum time to take [s] + void Receive(float min_time); void yieldToOtherThreads(float dtime); PlayerSAO* StageTwoClientInit(session_t peer_id);