3 Copyright (C) 2010-2011 celeron55, Perttu Ahola <celeron55@gmail.com>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License along
16 with this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
23 #include "connection.h"
24 #include "environment.h"
25 #include "common_irrlicht.h"
29 #include "inventory.h"
33 #include "serialization.h" // For SER_FMT_VER_INVALID
34 #include "serverremoteplayer.h"
36 #include "inventorymanager.h"
38 typedef struct lua_State lua_State;
39 class IWritableItemDefManager;
40 class IWritableNodeDefManager;
41 class IWritableCraftDefManager;
46 v3f findSpawnPos(ServerMap &map);
49 A structure containing the data needed for queueing the fetching
52 struct QueuedBlockEmerge
55 // key = peer_id, value = flags
56 core::map<u16, u8> peer_ids;
60 This is a thread-safe class.
62 class BlockEmergeQueue
72 JMutexAutoLock lock(m_mutex);
74 core::list<QueuedBlockEmerge*>::Iterator i;
75 for(i=m_queue.begin(); i!=m_queue.end(); i++)
77 QueuedBlockEmerge *q = *i;
83 peer_id=0 adds with nobody to send to
85 void addBlock(u16 peer_id, v3s16 pos, u8 flags)
87 DSTACK(__FUNCTION_NAME);
89 JMutexAutoLock lock(m_mutex);
94 Find if block is already in queue.
95 If it is, update the peer to it and quit.
97 core::list<QueuedBlockEmerge*>::Iterator i;
98 for(i=m_queue.begin(); i!=m_queue.end(); i++)
100 QueuedBlockEmerge *q = *i;
103 q->peer_ids[peer_id] = flags;
112 QueuedBlockEmerge *q = new QueuedBlockEmerge;
115 q->peer_ids[peer_id] = flags;
116 m_queue.push_back(q);
119 // Returned pointer must be deleted
120 // Returns NULL if queue is empty
121 QueuedBlockEmerge * pop()
123 JMutexAutoLock lock(m_mutex);
125 core::list<QueuedBlockEmerge*>::Iterator i = m_queue.begin();
126 if(i == m_queue.end())
128 QueuedBlockEmerge *q = *i;
135 JMutexAutoLock lock(m_mutex);
136 return m_queue.size();
139 u32 peerItemCount(u16 peer_id)
141 JMutexAutoLock lock(m_mutex);
145 core::list<QueuedBlockEmerge*>::Iterator i;
146 for(i=m_queue.begin(); i!=m_queue.end(); i++)
148 QueuedBlockEmerge *q = *i;
149 if(q->peer_ids.find(peer_id) != NULL)
157 core::list<QueuedBlockEmerge*> m_queue;
163 class ServerThread : public SimpleThread
169 ServerThread(Server *server):
178 class EmergeThread : public SimpleThread
184 EmergeThread(Server *server):
195 if(IsRunning() == false)
205 char name[PLAYERNAME_SIZE];
211 void PrintLine(std::ostream *s);
214 u32 PIChecksum(core::list<PlayerInfo> &l);
217 Used for queueing and sorting block transfers in containers
219 Lower priority number means higher priority.
221 struct PrioritySortedBlockTransfer
223 PrioritySortedBlockTransfer(float a_priority, v3s16 a_pos, u16 a_peer_id)
225 priority = a_priority;
229 bool operator < (PrioritySortedBlockTransfer &other)
231 return priority < other.priority;
238 struct TextureRequest
242 TextureRequest(const std::string &name_=""):
247 struct TextureInformation
250 std::string sha1_digest;
252 TextureInformation(const std::string path_="",
253 const std::string sha1_digest_=""):
255 sha1_digest(sha1_digest_)
263 // peer_id=0 means this client has no associated peer
264 // NOTE: If client is made allowed to exist while peer doesn't,
265 // this has to be set to 0 when there is no peer.
266 // Also, the client must be moved to some other container.
268 // The serialization version to use with the client
269 u8 serialization_version;
271 u16 net_proto_version;
272 // Version is stored in here after INIT before INIT2
273 u8 pending_serialization_version;
275 bool definitions_sent;
278 m_time_from_building(9999),
279 m_excess_gotblocks(0)
282 serialization_version = SER_FMT_VER_INVALID;
283 net_proto_version = 0;
284 pending_serialization_version = SER_FMT_VER_INVALID;
285 definitions_sent = false;
286 m_nearest_unsent_d = 0;
287 m_nearest_unsent_reset_timer = 0.0;
288 m_nothing_to_send_counter = 0;
289 m_nothing_to_send_pause_timer = 0;
296 Finds block that should be sent next to the client.
297 Environment should be locked when this is called.
298 dtime is used for resetting send radius at slow interval
300 void GetNextBlocks(Server *server, float dtime,
301 core::array<PrioritySortedBlockTransfer> &dest);
303 void GotBlock(v3s16 p);
305 void SentBlock(v3s16 p);
307 void SetBlockNotSent(v3s16 p);
308 void SetBlocksNotSent(core::map<v3s16, MapBlock*> &blocks);
312 return m_blocks_sending.size();
315 // Increments timeouts and removes timed-out blocks from list
316 // NOTE: This doesn't fix the server-not-sending-block bug
317 // because it is related to emerging, not sending.
318 //void RunSendingTimeouts(float dtime, float timeout);
320 void PrintInfo(std::ostream &o)
322 o<<"RemoteClient "<<peer_id<<": "
323 <<"m_blocks_sent.size()="<<m_blocks_sent.size()
324 <<", m_blocks_sending.size()="<<m_blocks_sending.size()
325 <<", m_nearest_unsent_d="<<m_nearest_unsent_d
326 <<", m_excess_gotblocks="<<m_excess_gotblocks
328 m_excess_gotblocks = 0;
331 // Time from last placing or removing blocks
332 float m_time_from_building;
334 /*JMutex m_dig_mutex;
335 float m_dig_time_remaining;
338 v3s16 m_dig_position;*/
341 List of active objects that the client knows of.
344 core::map<u16, bool> m_known_objects;
348 Blocks that have been sent to client.
349 - These don't have to be sent again.
350 - A block is cleared from here when client says it has
351 deleted it from it's memory
353 Key is position, value is dummy.
354 No MapBlock* is stored here because the blocks can get deleted.
356 core::map<v3s16, bool> m_blocks_sent;
357 s16 m_nearest_unsent_d;
359 float m_nearest_unsent_reset_timer;
362 Blocks that are currently on the line.
363 This is used for throttling the sending of blocks.
364 - The size of this list is limited to some value
365 Block is added when it is sent with BLOCKDATA.
366 Block is removed when GOTBLOCKS is received.
367 Value is time from sending. (not used at the moment)
369 core::map<v3s16, float> m_blocks_sending;
372 Count of excess GotBlocks().
373 There is an excess amount because the client sometimes
374 gets a block so late that the server sends it again,
375 and the client then sends two GOTBLOCKs.
376 This is resetted by PrintInfo()
378 u32 m_excess_gotblocks;
380 // CPU usage optimization
381 u32 m_nothing_to_send_counter;
382 float m_nothing_to_send_pause_timer;
385 class Server : public con::PeerHandler, public MapEventReceiver,
386 public InventoryManager, public IGameDef,
387 public IBackgroundBlockEmerger
391 NOTE: Every public method should be thread-safe
395 std::string mapsavedir,
396 std::string configpath
399 void start(unsigned short port);
401 // This is mainly a way to pass the time to the server.
402 // Actual processing is done in an another thread.
403 void step(float dtime);
404 // This is run by ServerThread and does the actual processing
407 void ProcessData(u8 *data, u32 datasize, u16 peer_id);
409 core::list<PlayerInfo> getPlayerInfo();
411 /*u32 getDayNightRatio()
413 return time_to_daynight_ratio(m_time_of_day.get());
416 // Environment must be locked when called
417 void setTimeOfDay(u32 time)
419 m_env->setTimeOfDay(time);
420 m_time_of_day_send_timer = 0;
423 bool getShutdownRequested()
425 return m_shutdown_requested;
429 Shall be called with the environment locked.
430 This is accessed by the map, which is inside the environment,
431 so it shouldn't be a problem.
433 void onMapEditEvent(MapEditEvent *event);
436 Shall be called with the environment and the connection locked.
438 Inventory* getInventory(const InventoryLocation &loc);
439 std::string getInventoryOwner(const InventoryLocation &loc);
440 void setInventoryModified(const InventoryLocation &loc);
442 // Connection must be locked when called
443 std::wstring getStatusString();
445 void requestShutdown(void)
447 m_shutdown_requested = true;
451 // Envlock and conlock should be locked when calling this
452 void SendMovePlayer(Player *player);
454 u64 getPlayerAuthPrivs(const std::string &name)
457 return m_authmanager.getPrivs(name);
459 catch(AuthNotFoundException &e)
461 dstream<<"WARNING: Auth not found for "<<name<<std::endl;
466 void setPlayerAuthPrivs(const std::string &name, u64 privs)
469 return m_authmanager.setPrivs(name, privs);
471 catch(AuthNotFoundException &e)
473 dstream<<"WARNING: Auth not found for "<<name<<std::endl;
477 // Changes a player's password, password must be given as plaintext
478 // If the player doesn't exist, a new entry is added to the auth manager
479 void setPlayerPassword(const std::string &name, const std::wstring &password);
481 // Saves g_settings to configpath given at initialization
484 void setIpBanned(const std::string &ip, const std::string &name)
486 m_banmanager.add(ip, name);
490 void unsetIpBanned(const std::string &ip_or_name)
492 m_banmanager.remove(ip_or_name);
496 std::string getBanDescription(const std::string &ip_or_name)
498 return m_banmanager.getBanDescription(ip_or_name);
501 Address getPeerAddress(u16 peer_id)
503 return m_con.GetPeerAddress(peer_id);
506 // Envlock and conlock should be locked when calling this
507 void notifyPlayer(const char *name, const std::wstring msg);
508 void notifyPlayers(const std::wstring msg);
510 void queueBlockEmerge(v3s16 blockpos, bool allow_generate);
512 // Envlock and conlock should be locked when using Lua
513 lua_State *getLua(){ return m_lua; }
515 // IGameDef interface
517 virtual IItemDefManager* getItemDefManager();
518 virtual INodeDefManager* getNodeDefManager();
519 virtual ICraftDefManager* getCraftDefManager();
520 virtual ITextureSource* getTextureSource();
521 virtual u16 allocateUnknownNodeId(const std::string &name);
523 IWritableItemDefManager* getWritableItemDefManager();
524 IWritableNodeDefManager* getWritableNodeDefManager();
525 IWritableCraftDefManager* getWritableCraftDefManager();
527 const ModSpec* getModSpec(const std::string &modname);
529 std::string getWorldPath(){ return m_mapsavedir; }
533 // con::PeerHandler implementation.
534 // These queue stuff to be processed by handlePeerChanges().
535 // As of now, these create and remove clients and players.
536 void peerAdded(con::Peer *peer);
537 void deletingPeer(con::Peer *peer, bool timeout);
543 static void SendHP(con::Connection &con, u16 peer_id, u8 hp);
544 static void SendAccessDenied(con::Connection &con, u16 peer_id,
545 const std::wstring &reason);
546 static void SendDeathscreen(con::Connection &con, u16 peer_id,
547 bool set_camera_point_target, v3f camera_point_target);
548 static void SendItemDef(con::Connection &con, u16 peer_id,
549 IItemDefManager *itemdef);
550 static void SendNodeDef(con::Connection &con, u16 peer_id,
551 INodeDefManager *nodedef);
554 Non-static send methods.
555 Conlock should be always used.
556 Envlock usage is documented badly but it's easy to figure out
557 which ones access the environment.
560 // Envlock and conlock should be locked when calling these
561 void SendInventory(u16 peer_id);
562 // send wielded item info about player to all
563 void SendWieldedItem(const ServerRemotePlayer *srp);
564 // send wielded item info about all players to all players
565 void SendPlayerItems();
566 void SendChatMessage(u16 peer_id, const std::wstring &message);
567 void BroadcastChatMessage(const std::wstring &message);
568 void SendPlayerHP(Player *player);
570 Send a node removal/addition event to all clients except ignore_id.
571 Additionally, if far_players!=NULL, players further away than
572 far_d_nodes are ignored and their peer_ids are added to far_players
574 // Envlock and conlock should be locked when calling these
575 void sendRemoveNode(v3s16 p, u16 ignore_id=0,
576 core::list<u16> *far_players=NULL, float far_d_nodes=100);
577 void sendAddNode(v3s16 p, MapNode n, u16 ignore_id=0,
578 core::list<u16> *far_players=NULL, float far_d_nodes=100);
579 void setBlockNotSent(v3s16 p);
581 // Environment and Connection must be locked when called
582 void SendBlockNoLock(u16 peer_id, MapBlock *block, u8 ver);
584 // Sends blocks to clients (locks env and con on its own)
585 void SendBlocks(float dtime);
587 void PrepareTextures();
589 void SendTextureAnnouncement(u16 peer_id);
591 void SendTexturesRequested(u16 peer_id,core::list<TextureRequest> tosend);
597 void DiePlayer(Player *player);
598 void RespawnPlayer(Player *player);
600 void UpdateCrafting(u16 peer_id);
602 // When called, connection mutex should be locked
603 RemoteClient* getClient(u16 peer_id);
605 // When called, environment mutex should be locked
606 std::string getPlayerName(u16 peer_id)
608 Player *player = m_env->getPlayer(peer_id);
610 return "[id="+itos(peer_id);
611 return player->getName();
615 Get a player from memory or creates one.
616 If player is already connected, return NULL
617 Does not verify/modify auth info and password.
619 Call with env and con locked.
621 ServerRemotePlayer *emergePlayer(const char *name, u16 peer_id);
623 // Locks environment and connection by its own
625 void handlePeerChange(PeerChange &c);
626 void handlePeerChanges();
628 u64 getPlayerPrivs(Player *player);
635 float m_liquid_transform_timer;
636 float m_print_info_timer;
637 float m_objectdata_timer;
638 float m_emergethread_trigger_timer;
639 float m_savemap_timer;
640 IntervalLimiter m_map_timer_and_unload_interval;
642 // NOTE: If connection and environment are both to be locked,
643 // environment shall be locked first.
646 ServerEnvironment *m_env;
650 con::Connection m_con;
652 // Connected clients (behind the con mutex)
653 core::map<u16, RemoteClient*> m_clients;
655 // User authentication
656 AuthManager m_authmanager;
659 BanManager m_banmanager;
662 // Envlock and conlock should be locked when using Lua
665 // Item definition manager
666 IWritableItemDefManager *m_itemdef;
668 // Node definition manager
669 IWritableNodeDefManager *m_nodedef;
671 // Craft definition manager
672 IWritableCraftDefManager *m_craftdef;
675 core::list<ModSpec> m_mods;
681 // A buffer for time steps
682 // step() increments and AsyncRunStep() run by m_thread reads it.
684 JMutex m_step_dtime_mutex;
686 // The server mainly operates in this thread
687 ServerThread m_thread;
688 // This thread fetches and generates map
689 EmergeThread m_emergethread;
690 // Queue of block coordinates to be processed by the emerge thread
691 BlockEmergeQueue m_emerge_queue;
698 //MutexedVariable<u32> m_time_of_day;
699 // Used to buffer dtime for adding to m_time_of_day
700 float m_time_counter;
701 // Timer for sending time of day over network
702 float m_time_of_day_send_timer;
703 // Uptime of server in seconds
704 MutexedVariable<double> m_uptime;
708 Queues stuff from peerAdded() and deletingPeer() to
722 Queue<PeerChange> m_peer_change_queue;
729 std::string m_mapsavedir;
731 // Configuration path ("" = no configuration file)
732 std::string m_configpath;
734 // Mod parent directory paths
735 core::list<std::string> m_modspaths;
737 bool m_shutdown_requested;
740 Map edit event queue. Automatically receives all map edits.
741 The constructor of this class registers us to receive them through
744 NOTE: Should these be moved to actually be members of
749 Queue of map edits from the environment for sending to the clients
750 This is behind m_env_mutex
752 Queue<MapEditEvent*> m_unsent_map_edit_queue;
754 Set to true when the server itself is modifying the map and does
755 all sending of information by itself.
756 This is behind m_env_mutex
758 bool m_ignore_map_edit_events;
760 If set to !=0, the incoming MapEditEvents are modified to have
761 this peed id as the disabled recipient
762 This is behind m_env_mutex
764 u16 m_ignore_map_edit_events_peer_id;
766 friend class EmergeThread;
767 friend class RemoteClient;
769 std::map<std::string,TextureInformation> m_Textures;
773 Runs a simple dedicated server loop.
775 Shuts down when run is set to false.
777 void dedicated_server_loop(Server &server, bool &run);