Fix server steps shorter than dedicated_server_step since #13370 (#15330)

Co-authored-by: Desour <ds.desour@proton.me>
Co-authored-by: sfan5 <sfan5@live.de>
This commit is contained in:
grorp 2024-10-28 15:57:22 +01:00 committed by GitHub
parent 806fba6448
commit a450301686
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 19 additions and 16 deletions

@ -2067,6 +2067,8 @@ ask_reconnect_on_crash (Ask to reconnect after crash) bool false
# Length of a server tick (the interval at which everything is generally updated), # Length of a server tick (the interval at which everything is generally updated),
# stated in seconds. # stated in seconds.
# Does not apply to sessions hosted from the client menu. # Does not apply to sessions hosted from the client menu.
# This is a lower bound, i.e. server steps may not be shorter than this, but
# they are often longer.
dedicated_server_step (Dedicated server step) float 0.09 0.0 1.0 dedicated_server_step (Dedicated server step) float 0.09 0.0 1.0
# Whether players are shown to clients without any range limit. # Whether players are shown to clients without any range limit.

@ -6926,10 +6926,6 @@ Timing
* `time` is a lower bound. The job is executed in the first server-step that * `time` is a lower bound. The job is executed in the first server-step that
started at least `time` seconds after the last time a server-step started, started at least `time` seconds after the last time a server-step started,
measured with globalstep dtime. measured with globalstep dtime.
* In particular this can result in relatively large delays if `time` is close
to the server-step dtime. For example, with a target server-step of 0.09 s,
`core.after(0.09, ...)` often waits two steps, resulting in a delay of about
0.18 s.
* If `time` is `0`, the job is executed in the next step. * If `time` is `0`, the job is executed in the next step.
* `job:cancel()` * `job:cancel()`

@ -147,7 +147,8 @@ void *ServerThread::run()
try { try {
// see explanation inside // see explanation inside
if (dtime > step_settings.steplen) // (+1 ms, because we don't sleep more fine-grained)
if (dtime > step_settings.steplen + 0.001f)
m_server->yieldToOtherThreads(dtime); m_server->yieldToOtherThreads(dtime);
m_server->AsyncRunStep(step_settings.pause ? 0.0f : dtime); m_server->AsyncRunStep(step_settings.pause ? 0.0f : dtime);
@ -1084,15 +1085,15 @@ void Server::AsyncRunStep(float dtime, bool initial_step)
m_shutdown_state.tick(dtime, this); m_shutdown_state.tick(dtime, this);
} }
void Server::Receive(float timeout) void Server::Receive(float min_time)
{ {
ZoneScoped; ZoneScoped;
auto framemarker = FrameMarker("Server::Receive()-frame").started(); auto framemarker = FrameMarker("Server::Receive()-frame").started();
const u64 t0 = porting::getTimeUs(); const u64 t0 = porting::getTimeUs();
const float timeout_us = timeout * 1e6f; const float min_time_us = min_time * 1e6f;
auto remaining_time_us = [&]() -> float { auto remaining_time_us = [&]() -> float {
return std::max(0.0f, timeout_us - (porting::getTimeUs() - t0)); return std::max(0.0f, min_time_us - (porting::getTimeUs() - t0));
}; };
NetworkPacket pkt; NetworkPacket pkt;
@ -1101,15 +1102,17 @@ void Server::Receive(float timeout)
pkt.clear(); pkt.clear();
peer_id = 0; peer_id = 0;
try { try {
if (!m_con->ReceiveTimeoutMs(&pkt, // Round up since the target step length is the minimum step length,
(u32)remaining_time_us() / 1000)) { // we only have millisecond precision and we don't want to busy-wait
// by calling ReceiveTimeoutMs(.., 0) repeatedly.
const u32 cur_timeout_ms = std::ceil(remaining_time_us() / 1000.0f);
if (!m_con->ReceiveTimeoutMs(&pkt, cur_timeout_ms)) {
// No incoming data. // No incoming data.
// Already break if there's 1ms left, as ReceiveTimeoutMs is too coarse if (remaining_time_us() > 0.0f)
// and a faster server-step is better than busy waiting.
if (remaining_time_us() < 1000.0f)
break;
else
continue; continue;
else
break;
} }
peer_id = pkt.getPeerId(); peer_id = pkt.getPeerId();

@ -205,7 +205,9 @@ public:
// This is run by ServerThread and does the actual processing // This is run by ServerThread and does the actual processing
void AsyncRunStep(float dtime, bool initial_step = false); void AsyncRunStep(float dtime, bool initial_step = false);
void Receive(float timeout); /// Receive and process all incoming packets. Sleep if the time goal isn't met.
/// @param min_time minimum time to take [s]
void Receive(float min_time);
void yieldToOtherThreads(float dtime); void yieldToOtherThreads(float dtime);
PlayerSAO* StageTwoClientInit(session_t peer_id); PlayerSAO* StageTwoClientInit(session_t peer_id);