Projects : bitcoin : bitcoin_getblockindex_etc
1 | // Copyright (c) 2009-2010 Satoshi Nakamoto |
2 | // Copyright (c) 2009-2012 The Bitcoin developers |
3 | // Distributed under the MIT/X11 software license, see the accompanying |
4 | // file license.txt or http://www.opensource.org/licenses/mit-license.php. |
5 | |
6 | #include "headers.h" |
7 | #include "db.h" |
8 | #include "net.h" |
9 | #include "init.h" |
10 | #include "strlcpy.h" |
11 | |
12 | |
13 | using namespace std; |
14 | using namespace boost; |
15 | |
16 | static const int MAX_OUTBOUND_CONNECTIONS = 8; |
17 | |
18 | void ThreadMessageHandler2(void* parg); |
19 | void ThreadSocketHandler2(void* parg); |
20 | void ThreadOpenConnections2(void* parg); |
21 | bool OpenNetworkConnection(const CAddress& addrConnect); |
22 | |
23 | |
24 | |
25 | |
26 | |
27 | // |
28 | // Global state variables |
29 | // |
30 | bool fClient = false; |
31 | uint64 nLocalServices = (fClient ? 0 : NODE_NETWORK); |
32 | CAddress addrLocalHost("0.0.0.0", 0, nLocalServices); |
33 | static CNode* pnodeLocalHost = NULL; |
34 | uint64 nLocalHostNonce = 0; |
35 | array<int, 10> vnThreadsRunning; |
36 | static SOCKET hListenSocket = INVALID_SOCKET; |
37 | |
38 | vector<CNode*> vNodes; |
39 | CCriticalSection cs_vNodes; |
40 | map<vector<unsigned char>, CAddress> mapAddresses; |
41 | CCriticalSection cs_mapAddresses; |
42 | map<CInv, CDataStream> mapRelay; |
43 | deque<pair<int64, CInv> > vRelayExpiration; |
44 | CCriticalSection cs_mapRelay; |
45 | map<CInv, int64> mapAlreadyAskedFor; |
46 | |
47 | // Settings |
48 | int fUseProxy = false; |
49 | int nConnectTimeout = 5000; |
50 | CAddress addrProxy("127.0.0.1",9050); |
51 | |
52 | |
53 | |
54 | |
55 | unsigned short GetListenPort() |
56 | { |
57 | return (unsigned short)(GetArg("-port", GetDefaultPort())); |
58 | } |
59 | |
60 | void CNode::PushGetBlocks(CBlockIndex* pindexBegin, uint256 hashEnd) |
61 | { |
62 | PushMessage("getblocks", CBlockLocator(pindexBegin), hashEnd); |
63 | } |
64 | |
65 | |
66 | |
67 | |
68 | |
69 | bool ConnectSocket(const CAddress& addrConnect, SOCKET& hSocketRet, int nTimeout) |
70 | { |
71 | hSocketRet = INVALID_SOCKET; |
72 | |
73 | SOCKET hSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); |
74 | if (hSocket == INVALID_SOCKET) |
75 | return false; |
76 | #ifdef SO_NOSIGPIPE |
77 | int set = 1; |
78 | setsockopt(hSocket, SOL_SOCKET, SO_NOSIGPIPE, (void*)&set, sizeof(int)); |
79 | #endif |
80 | |
81 | bool fProxy = (fUseProxy && addrConnect.IsRoutable()); |
82 | struct sockaddr_in sockaddr = (fProxy ? addrProxy.GetSockAddr() : addrConnect.GetSockAddr()); |
83 | |
84 | int fFlags = fcntl(hSocket, F_GETFL, 0); |
85 | if (fcntl(hSocket, F_SETFL, fFlags | O_NONBLOCK) == -1) |
86 | |
87 | { |
88 | closesocket(hSocket); |
89 | return false; |
90 | } |
91 | |
92 | |
93 | if (connect(hSocket, (struct sockaddr*)&sockaddr, sizeof(sockaddr)) == SOCKET_ERROR) |
94 | { |
95 | // WSAEINVAL is here because some legacy version of winsock uses it |
96 | if (WSAGetLastError() == WSAEINPROGRESS || WSAGetLastError() == WSAEWOULDBLOCK || WSAGetLastError() == WSAEINVAL) |
97 | { |
98 | struct timeval timeout; |
99 | timeout.tv_sec = nTimeout / 1000; |
100 | timeout.tv_usec = (nTimeout % 1000) * 1000; |
101 | |
102 | fd_set fdset; |
103 | FD_ZERO(&fdset); |
104 | FD_SET(hSocket, &fdset); |
105 | int nRet = select(hSocket + 1, NULL, &fdset, NULL, &timeout); |
106 | if (nRet == 0) |
107 | { |
108 | printf("connection timeout\n"); |
109 | closesocket(hSocket); |
110 | return false; |
111 | } |
112 | if (nRet == SOCKET_ERROR) |
113 | { |
114 | printf("select() for connection failed: %i\n",WSAGetLastError()); |
115 | closesocket(hSocket); |
116 | return false; |
117 | } |
118 | socklen_t nRetSize = sizeof(nRet); |
119 | if (getsockopt(hSocket, SOL_SOCKET, SO_ERROR, &nRet, &nRetSize) == SOCKET_ERROR) |
120 | { |
121 | printf("getsockopt() for connection failed: %i\n",WSAGetLastError()); |
122 | closesocket(hSocket); |
123 | return false; |
124 | } |
125 | if (nRet != 0) |
126 | { |
127 | printf("connect() failed after select(): %s\n",strerror(nRet)); |
128 | closesocket(hSocket); |
129 | return false; |
130 | } |
131 | } |
132 | else |
133 | { |
134 | printf("connect() failed: %i\n",WSAGetLastError()); |
135 | closesocket(hSocket); |
136 | return false; |
137 | } |
138 | } |
139 | |
140 | /* |
141 | this isn't even strictly necessary |
142 | CNode::ConnectNode immediately turns the socket back to non-blocking |
143 | but we'll turn it back to blocking just in case |
144 | */ |
145 | fFlags = fcntl(hSocket, F_GETFL, 0); |
146 | if (fcntl(hSocket, F_SETFL, fFlags & !O_NONBLOCK) == SOCKET_ERROR) |
147 | { |
148 | closesocket(hSocket); |
149 | return false; |
150 | } |
151 | |
152 | if (fProxy) |
153 | { |
154 | printf("proxy connecting %s\n", addrConnect.ToString().c_str()); |
155 | char pszSocks4IP[] = "\4\1\0\0\0\0\0\0user"; |
156 | memcpy(pszSocks4IP + 2, &addrConnect.port, 2); |
157 | memcpy(pszSocks4IP + 4, &addrConnect.ip, 4); |
158 | char* pszSocks4 = pszSocks4IP; |
159 | int nSize = sizeof(pszSocks4IP); |
160 | |
161 | int ret = send(hSocket, pszSocks4, nSize, MSG_NOSIGNAL); |
162 | if (ret != nSize) |
163 | { |
164 | closesocket(hSocket); |
165 | return error("Error sending to proxy"); |
166 | } |
167 | char pchRet[8]; |
168 | if (recv(hSocket, pchRet, 8, 0) != 8) |
169 | { |
170 | closesocket(hSocket); |
171 | return error("Error reading proxy response"); |
172 | } |
173 | if (pchRet[1] != 0x5a) |
174 | { |
175 | closesocket(hSocket); |
176 | if (pchRet[1] != 0x5b) |
177 | printf("ERROR: Proxy returned error %d\n", pchRet[1]); |
178 | return false; |
179 | } |
180 | printf("proxy connected %s\n", addrConnect.ToString().c_str()); |
181 | } |
182 | |
183 | hSocketRet = hSocket; |
184 | return true; |
185 | } |
186 | |
187 | // portDefault is in host order |
188 | bool Lookup(const char *pszName, vector<CAddress>& vaddr, int nServices, int nMaxSolutions, int portDefault, bool fAllowPort) |
189 | { |
190 | vaddr.clear(); |
191 | if (pszName[0] == 0) |
192 | return false; |
193 | int port = portDefault; |
194 | char psz[256]; |
195 | char *pszHost = psz; |
196 | strlcpy(psz, pszName, sizeof(psz)); |
197 | if (fAllowPort) |
198 | { |
199 | char* pszColon = strrchr(psz+1,':'); |
200 | char *pszPortEnd = NULL; |
201 | int portParsed = pszColon ? strtoul(pszColon+1, &pszPortEnd, 10) : 0; |
202 | if (pszColon && pszPortEnd && pszPortEnd[0] == 0) |
203 | { |
204 | if (psz[0] == '[' && pszColon[-1] == ']') |
205 | { |
206 | // Future: enable IPv6 colon-notation inside [] |
207 | pszHost = psz+1; |
208 | pszColon[-1] = 0; |
209 | } |
210 | else |
211 | pszColon[0] = 0; |
212 | port = portParsed; |
213 | if (port < 0 || port > USHRT_MAX) |
214 | port = USHRT_MAX; |
215 | } |
216 | } |
217 | |
218 | unsigned int addrIP = inet_addr(pszHost); |
219 | if (addrIP != INADDR_NONE) |
220 | { |
221 | // valid IP address passed |
222 | vaddr.push_back(CAddress(addrIP, port, nServices)); |
223 | return true; |
224 | } |
225 | |
226 | return false; |
227 | } |
228 | |
229 | // portDefault is in host order |
230 | bool Lookup(const char *pszName, CAddress& addr, int nServices, int portDefault, bool fAllowPort) |
231 | { |
232 | vector<CAddress> vaddr; |
233 | bool fRet = Lookup(pszName, vaddr, nServices, 1, portDefault, fAllowPort); |
234 | if (fRet) |
235 | addr = vaddr[0]; |
236 | return fRet; |
237 | } |
238 | |
239 | |
240 | bool AddAddress(CAddress addr, int64 nTimePenalty, CAddrDB *pAddrDB) |
241 | { |
242 | if (!addr.IsRoutable()) |
243 | return false; |
244 | if (addr.ip == addrLocalHost.ip) |
245 | return false; |
246 | addr.nTime = max((int64)0, (int64)addr.nTime - nTimePenalty); |
247 | bool fUpdated = false; |
248 | bool fNew = false; |
249 | CAddress addrFound = addr; |
250 | |
251 | CRITICAL_BLOCK(cs_mapAddresses) |
252 | { |
253 | map<vector<unsigned char>, CAddress>::iterator it = mapAddresses.find(addr.GetKey()); |
254 | if (it == mapAddresses.end()) |
255 | { |
256 | // New address |
257 | printf("AddAddress(%s)\n", addr.ToString().c_str()); |
258 | mapAddresses.insert(make_pair(addr.GetKey(), addr)); |
259 | fUpdated = true; |
260 | fNew = true; |
261 | } |
262 | else |
263 | { |
264 | addrFound = (*it).second; |
265 | if ((addrFound.nServices | addr.nServices) != addrFound.nServices) |
266 | { |
267 | // Services have been added |
268 | addrFound.nServices |= addr.nServices; |
269 | fUpdated = true; |
270 | } |
271 | bool fCurrentlyOnline = (GetAdjustedTime() - addr.nTime < 24 * 60 * 60); |
272 | int64 nUpdateInterval = (fCurrentlyOnline ? 60 * 60 : 24 * 60 * 60); |
273 | if (addrFound.nTime < addr.nTime - nUpdateInterval) |
274 | { |
275 | // Periodically update most recently seen time |
276 | addrFound.nTime = addr.nTime; |
277 | fUpdated = true; |
278 | } |
279 | } |
280 | } |
281 | // There is a nasty deadlock bug if this is done inside the cs_mapAddresses |
282 | // CRITICAL_BLOCK: |
283 | // Thread 1: begin db transaction (locks inside-db-mutex) |
284 | // then AddAddress (locks cs_mapAddresses) |
285 | // Thread 2: AddAddress (locks cs_mapAddresses) |
286 | // ... then db operation hangs waiting for inside-db-mutex |
287 | if (fUpdated) |
288 | { |
289 | if (pAddrDB) |
290 | pAddrDB->WriteAddress(addrFound); |
291 | else |
292 | CAddrDB().WriteAddress(addrFound); |
293 | } |
294 | return fNew; |
295 | } |
296 | |
297 | void AddressCurrentlyConnected(const CAddress& addr) |
298 | { |
299 | CAddress *paddrFound = NULL; |
300 | |
301 | CRITICAL_BLOCK(cs_mapAddresses) |
302 | { |
303 | // Only if it's been published already |
304 | map<vector<unsigned char>, CAddress>::iterator it = mapAddresses.find(addr.GetKey()); |
305 | if (it != mapAddresses.end()) |
306 | paddrFound = &(*it).second; |
307 | } |
308 | |
309 | if (paddrFound) |
310 | { |
311 | int64 nUpdateInterval = 20 * 60; |
312 | if (paddrFound->nTime < GetAdjustedTime() - nUpdateInterval) |
313 | { |
314 | // Periodically update most recently seen time |
315 | paddrFound->nTime = GetAdjustedTime(); |
316 | CAddrDB addrdb; |
317 | addrdb.WriteAddress(*paddrFound); |
318 | } |
319 | } |
320 | } |
321 | |
322 | |
323 | |
324 | |
325 | |
326 | void AbandonRequests(void (*fn)(void*, CDataStream&), void* param1) |
327 | { |
328 | // If the dialog might get closed before the reply comes back, |
329 | // call this in the destructor so it doesn't get called after it's deleted. |
330 | CRITICAL_BLOCK(cs_vNodes) |
331 | { |
332 | BOOST_FOREACH(CNode* pnode, vNodes) |
333 | { |
334 | CRITICAL_BLOCK(pnode->cs_mapRequests) |
335 | { |
336 | for (map<uint256, CRequestTracker>::iterator mi = pnode->mapRequests.begin(); mi != pnode->mapRequests.end();) |
337 | { |
338 | CRequestTracker& tracker = (*mi).second; |
339 | if (tracker.fn == fn && tracker.param1 == param1) |
340 | pnode->mapRequests.erase(mi++); |
341 | else |
342 | mi++; |
343 | } |
344 | } |
345 | } |
346 | } |
347 | } |
348 | |
349 | |
350 | |
351 | |
352 | |
353 | |
354 | |
355 | // |
356 | // Subscription methods for the broadcast and subscription system. |
357 | // Channel numbers are message numbers, i.e. MSG_TABLE and MSG_PRODUCT. |
358 | // |
359 | // The subscription system uses a meet-in-the-middle strategy. |
360 | // With 100,000 nodes, if senders broadcast to 1000 random nodes and receivers |
361 | // subscribe to 1000 random nodes, 99.995% (1 - 0.99^1000) of messages will get through. |
362 | // |
363 | |
364 | bool AnySubscribed(unsigned int nChannel) |
365 | { |
366 | if (pnodeLocalHost->IsSubscribed(nChannel)) |
367 | return true; |
368 | CRITICAL_BLOCK(cs_vNodes) |
369 | BOOST_FOREACH(CNode* pnode, vNodes) |
370 | if (pnode->IsSubscribed(nChannel)) |
371 | return true; |
372 | return false; |
373 | } |
374 | |
375 | bool CNode::IsSubscribed(unsigned int nChannel) |
376 | { |
377 | if (nChannel >= vfSubscribe.size()) |
378 | return false; |
379 | return vfSubscribe[nChannel]; |
380 | } |
381 | |
382 | void CNode::Subscribe(unsigned int nChannel, unsigned int nHops) |
383 | { |
384 | if (nChannel >= vfSubscribe.size()) |
385 | return; |
386 | |
387 | if (!AnySubscribed(nChannel)) |
388 | { |
389 | // Relay subscribe |
390 | CRITICAL_BLOCK(cs_vNodes) |
391 | BOOST_FOREACH(CNode* pnode, vNodes) |
392 | if (pnode != this) |
393 | pnode->PushMessage("subscribe", nChannel, nHops); |
394 | } |
395 | |
396 | vfSubscribe[nChannel] = true; |
397 | } |
398 | |
399 | void CNode::CancelSubscribe(unsigned int nChannel) |
400 | { |
401 | if (nChannel >= vfSubscribe.size()) |
402 | return; |
403 | |
404 | // Prevent from relaying cancel if wasn't subscribed |
405 | if (!vfSubscribe[nChannel]) |
406 | return; |
407 | vfSubscribe[nChannel] = false; |
408 | |
409 | if (!AnySubscribed(nChannel)) |
410 | { |
411 | // Relay subscription cancel |
412 | CRITICAL_BLOCK(cs_vNodes) |
413 | BOOST_FOREACH(CNode* pnode, vNodes) |
414 | if (pnode != this) |
415 | pnode->PushMessage("sub-cancel", nChannel); |
416 | } |
417 | } |
418 | |
419 | |
420 | |
421 | |
422 | |
423 | |
424 | |
425 | |
426 | |
427 | CNode* FindNode(unsigned int ip) |
428 | { |
429 | CRITICAL_BLOCK(cs_vNodes) |
430 | { |
431 | BOOST_FOREACH(CNode* pnode, vNodes) |
432 | if (pnode->addr.ip == ip) |
433 | return (pnode); |
434 | } |
435 | return NULL; |
436 | } |
437 | |
438 | CNode* FindNode(CAddress addr) |
439 | { |
440 | CRITICAL_BLOCK(cs_vNodes) |
441 | { |
442 | BOOST_FOREACH(CNode* pnode, vNodes) |
443 | if (pnode->addr == addr) |
444 | return (pnode); |
445 | } |
446 | return NULL; |
447 | } |
448 | |
449 | CNode* ConnectNode(CAddress addrConnect, int64 nTimeout) |
450 | { |
451 | if (addrConnect.ip == addrLocalHost.ip) |
452 | return NULL; |
453 | |
454 | // Look for an existing connection |
455 | CNode* pnode = FindNode(addrConnect.ip); |
456 | if (pnode) |
457 | { |
458 | if (nTimeout != 0) |
459 | pnode->AddRef(nTimeout); |
460 | else |
461 | pnode->AddRef(); |
462 | return pnode; |
463 | } |
464 | |
465 | /// debug print |
466 | printf("trying connection %s lastseen=%.1fhrs lasttry=%.1fhrs\n", |
467 | addrConnect.ToString().c_str(), |
468 | (double)(addrConnect.nTime - GetAdjustedTime())/3600.0, |
469 | (double)(addrConnect.nLastTry - GetAdjustedTime())/3600.0); |
470 | |
471 | CRITICAL_BLOCK(cs_mapAddresses) |
472 | mapAddresses[addrConnect.GetKey()].nLastTry = GetAdjustedTime(); |
473 | |
474 | // Connect |
475 | SOCKET hSocket; |
476 | if (ConnectSocket(addrConnect, hSocket)) |
477 | { |
478 | /// debug print |
479 | printf("connected %s\n", addrConnect.ToString().c_str()); |
480 | |
481 | // Set to nonblocking |
482 | if (fcntl(hSocket, F_SETFL, O_NONBLOCK) == SOCKET_ERROR) |
483 | printf("ConnectSocket() : fcntl nonblocking setting failed, error %d\n", errno); |
484 | |
485 | // Add node |
486 | CNode* pnode = new CNode(hSocket, addrConnect, false); |
487 | if (nTimeout != 0) |
488 | pnode->AddRef(nTimeout); |
489 | else |
490 | pnode->AddRef(); |
491 | CRITICAL_BLOCK(cs_vNodes) |
492 | vNodes.push_back(pnode); |
493 | |
494 | pnode->nTimeConnected = GetTime(); |
495 | return pnode; |
496 | } |
497 | else |
498 | { |
499 | return NULL; |
500 | } |
501 | } |
502 | |
503 | void CNode::CloseSocketDisconnect() |
504 | { |
505 | fDisconnect = true; |
506 | if (hSocket != INVALID_SOCKET) |
507 | { |
508 | if (fDebug) |
509 | printf("%s ", DateTimeStrFormat("%x %H:%M:%S", GetTime()).c_str()); |
510 | printf("disconnecting node %s\n", addr.ToString().c_str()); |
511 | closesocket(hSocket); |
512 | hSocket = INVALID_SOCKET; |
513 | } |
514 | } |
515 | |
516 | void CNode::Cleanup() |
517 | { |
518 | // All of a nodes broadcasts and subscriptions are automatically torn down |
519 | // when it goes down, so a node has to stay up to keep its broadcast going. |
520 | |
521 | // Cancel subscriptions |
522 | for (unsigned int nChannel = 0; nChannel < vfSubscribe.size(); nChannel++) |
523 | if (vfSubscribe[nChannel]) |
524 | CancelSubscribe(nChannel); |
525 | } |
526 | |
527 | |
528 | std::map<unsigned int, int64> CNode::setBanned; |
529 | CCriticalSection CNode::cs_setBanned; |
530 | |
531 | void CNode::ClearBanned() |
532 | { |
533 | setBanned.clear(); |
534 | } |
535 | |
536 | bool CNode::IsBanned(unsigned int ip) |
537 | { |
538 | bool fResult = false; |
539 | CRITICAL_BLOCK(cs_setBanned) |
540 | { |
541 | std::map<unsigned int, int64>::iterator i = setBanned.find(ip); |
542 | if (i != setBanned.end()) |
543 | { |
544 | int64 t = (*i).second; |
545 | if (GetTime() < t) |
546 | fResult = true; |
547 | } |
548 | } |
549 | return fResult; |
550 | } |
551 | |
552 | bool CNode::Misbehaving(int howmuch) |
553 | { |
554 | if (addr.IsLocal()) |
555 | { |
556 | printf("Warning: local node %s misbehaving\n", addr.ToString().c_str()); |
557 | return false; |
558 | } |
559 | |
560 | nMisbehavior += howmuch; |
561 | if (nMisbehavior >= GetArg("-banscore", 100)) |
562 | { |
563 | int64 banTime = GetTime()+GetArg("-bantime", 60*60*24); // Default 24-hour ban |
564 | CRITICAL_BLOCK(cs_setBanned) |
565 | if (setBanned[addr.ip] < banTime) |
566 | setBanned[addr.ip] = banTime; |
567 | CloseSocketDisconnect(); |
568 | printf("Disconnected %s for misbehavior (score=%d)\n", addr.ToString().c_str(), nMisbehavior); |
569 | return true; |
570 | } |
571 | return false; |
572 | } |
573 | |
574 | |
575 | |
576 | |
577 | |
578 | |
579 | |
580 | |
581 | |
582 | |
583 | |
584 | |
585 | void ThreadSocketHandler(void* parg) |
586 | { |
587 | IMPLEMENT_RANDOMIZE_STACK(ThreadSocketHandler(parg)); |
588 | try |
589 | { |
590 | vnThreadsRunning[0]++; |
591 | ThreadSocketHandler2(parg); |
592 | vnThreadsRunning[0]--; |
593 | } |
594 | catch (std::exception& e) { |
595 | vnThreadsRunning[0]--; |
596 | PrintException(&e, "ThreadSocketHandler()"); |
597 | } catch (...) { |
598 | vnThreadsRunning[0]--; |
599 | throw; // support pthread_cancel() |
600 | } |
601 | printf("ThreadSocketHandler exiting\n"); |
602 | } |
603 | |
604 | void ThreadSocketHandler2(void* parg) |
605 | { |
606 | printf("ThreadSocketHandler started\n"); |
607 | list<CNode*> vNodesDisconnected; |
608 | int nPrevNodeCount = 0; |
609 | |
610 | loop |
611 | { |
612 | // |
613 | // Disconnect nodes |
614 | // |
615 | CRITICAL_BLOCK(cs_vNodes) |
616 | { |
617 | // Disconnect unused nodes |
618 | vector<CNode*> vNodesCopy = vNodes; |
619 | BOOST_FOREACH(CNode* pnode, vNodesCopy) |
620 | { |
621 | if (pnode->fDisconnect || |
622 | (pnode->GetRefCount() <= 0 && pnode->vRecv.empty() && pnode->vSend.empty())) |
623 | { |
624 | // remove from vNodes |
625 | vNodes.erase(remove(vNodes.begin(), vNodes.end(), pnode), vNodes.end()); |
626 | |
627 | // close socket and cleanup |
628 | pnode->CloseSocketDisconnect(); |
629 | pnode->Cleanup(); |
630 | |
631 | // hold in disconnected pool until all refs are released |
632 | pnode->nReleaseTime = max(pnode->nReleaseTime, GetTime() + 15 * 60); |
633 | if (pnode->fNetworkNode || pnode->fInbound) |
634 | pnode->Release(); |
635 | vNodesDisconnected.push_back(pnode); |
636 | } |
637 | } |
638 | |
639 | // Delete disconnected nodes |
640 | list<CNode*> vNodesDisconnectedCopy = vNodesDisconnected; |
641 | BOOST_FOREACH(CNode* pnode, vNodesDisconnectedCopy) |
642 | { |
643 | // wait until threads are done using it |
644 | if (pnode->GetRefCount() <= 0) |
645 | { |
646 | bool fDelete = false; |
647 | TRY_CRITICAL_BLOCK(pnode->cs_vSend) |
648 | TRY_CRITICAL_BLOCK(pnode->cs_vRecv) |
649 | TRY_CRITICAL_BLOCK(pnode->cs_mapRequests) |
650 | TRY_CRITICAL_BLOCK(pnode->cs_inventory) |
651 | fDelete = true; |
652 | if (fDelete) |
653 | { |
654 | vNodesDisconnected.remove(pnode); |
655 | delete pnode; |
656 | } |
657 | } |
658 | } |
659 | } |
660 | if (vNodes.size() != nPrevNodeCount) |
661 | { |
662 | nPrevNodeCount = vNodes.size(); |
663 | MainFrameRepaint(); |
664 | } |
665 | |
666 | |
667 | // |
668 | // Find which sockets have data to receive |
669 | // |
670 | struct timeval timeout; |
671 | timeout.tv_sec = 0; |
672 | timeout.tv_usec = 50000; // frequency to poll pnode->vSend |
673 | |
674 | fd_set fdsetRecv; |
675 | fd_set fdsetSend; |
676 | fd_set fdsetError; |
677 | FD_ZERO(&fdsetRecv); |
678 | FD_ZERO(&fdsetSend); |
679 | FD_ZERO(&fdsetError); |
680 | SOCKET hSocketMax = 0; |
681 | |
682 | if(hListenSocket != INVALID_SOCKET) |
683 | FD_SET(hListenSocket, &fdsetRecv); |
684 | hSocketMax = max(hSocketMax, hListenSocket); |
685 | CRITICAL_BLOCK(cs_vNodes) |
686 | { |
687 | BOOST_FOREACH(CNode* pnode, vNodes) |
688 | { |
689 | if (pnode->hSocket == INVALID_SOCKET) |
690 | continue; |
691 | FD_SET(pnode->hSocket, &fdsetRecv); |
692 | FD_SET(pnode->hSocket, &fdsetError); |
693 | hSocketMax = max(hSocketMax, pnode->hSocket); |
694 | TRY_CRITICAL_BLOCK(pnode->cs_vSend) |
695 | if (!pnode->vSend.empty()) |
696 | FD_SET(pnode->hSocket, &fdsetSend); |
697 | } |
698 | } |
699 | |
700 | vnThreadsRunning[0]--; |
701 | int nSelect = select(hSocketMax + 1, &fdsetRecv, &fdsetSend, &fdsetError, &timeout); |
702 | vnThreadsRunning[0]++; |
703 | if (fShutdown) |
704 | return; |
705 | if (nSelect == SOCKET_ERROR) |
706 | { |
707 | int nErr = WSAGetLastError(); |
708 | if (hSocketMax > -1) |
709 | { |
710 | printf("socket select error %d\n", nErr); |
711 | for (int i = 0; i <= hSocketMax; i++) |
712 | FD_SET(i, &fdsetRecv); |
713 | } |
714 | FD_ZERO(&fdsetSend); |
715 | FD_ZERO(&fdsetError); |
716 | Sleep(timeout.tv_usec/1000); |
717 | } |
718 | |
719 | |
720 | // |
721 | // Accept new connections |
722 | // |
723 | if (hListenSocket != INVALID_SOCKET && FD_ISSET(hListenSocket, &fdsetRecv)) |
724 | { |
725 | struct sockaddr_in sockaddr; |
726 | socklen_t len = sizeof(sockaddr); |
727 | SOCKET hSocket = accept(hListenSocket, (struct sockaddr*)&sockaddr, &len); |
728 | CAddress addr; |
729 | int nInbound = 0; |
730 | |
731 | if (hSocket != INVALID_SOCKET) |
732 | addr = CAddress(sockaddr); |
733 | |
734 | CRITICAL_BLOCK(cs_vNodes) |
735 | BOOST_FOREACH(CNode* pnode, vNodes) |
736 | if (pnode->fInbound) |
737 | nInbound++; |
738 | |
739 | if (hSocket == INVALID_SOCKET) |
740 | { |
741 | if (WSAGetLastError() != WSAEWOULDBLOCK) |
742 | printf("socket error accept failed: %d\n", WSAGetLastError()); |
743 | } |
744 | else if (nInbound >= GetArg("-maxconnections", 125) - MAX_OUTBOUND_CONNECTIONS) |
745 | { |
746 | closesocket(hSocket); |
747 | } |
748 | else if (CNode::IsBanned(addr.ip)) |
749 | { |
750 | printf("connection from %s dropped (banned)\n", addr.ToString().c_str()); |
751 | closesocket(hSocket); |
752 | } |
753 | else |
754 | { |
755 | printf("accepted connection %s\n", addr.ToString().c_str()); |
756 | CNode* pnode = new CNode(hSocket, addr, true); |
757 | pnode->AddRef(); |
758 | CRITICAL_BLOCK(cs_vNodes) |
759 | vNodes.push_back(pnode); |
760 | } |
761 | } |
762 | |
763 | |
764 | // |
765 | // Service each socket |
766 | // |
767 | vector<CNode*> vNodesCopy; |
768 | CRITICAL_BLOCK(cs_vNodes) |
769 | { |
770 | vNodesCopy = vNodes; |
771 | BOOST_FOREACH(CNode* pnode, vNodesCopy) |
772 | pnode->AddRef(); |
773 | } |
774 | BOOST_FOREACH(CNode* pnode, vNodesCopy) |
775 | { |
776 | if (fShutdown) |
777 | return; |
778 | |
779 | // |
780 | // Receive |
781 | // |
782 | if (pnode->hSocket == INVALID_SOCKET) |
783 | continue; |
784 | if (FD_ISSET(pnode->hSocket, &fdsetRecv) || FD_ISSET(pnode->hSocket, &fdsetError)) |
785 | { |
786 | TRY_CRITICAL_BLOCK(pnode->cs_vRecv) |
787 | { |
788 | CDataStream& vRecv = pnode->vRecv; |
789 | unsigned int nPos = vRecv.size(); |
790 | |
791 | if (nPos > ReceiveBufferSize()) { |
792 | if (!pnode->fDisconnect) |
793 | printf("socket recv flood control disconnect (%d bytes)\n", vRecv.size()); |
794 | pnode->CloseSocketDisconnect(); |
795 | } |
796 | else { |
797 | // typical socket buffer is 8K-64K |
798 | char pchBuf[0x10000]; |
799 | int nBytes = recv(pnode->hSocket, pchBuf, sizeof(pchBuf), MSG_DONTWAIT); |
800 | if (nBytes > 0) |
801 | { |
802 | vRecv.resize(nPos + nBytes); |
803 | memcpy(&vRecv[nPos], pchBuf, nBytes); |
804 | pnode->nLastRecv = GetTime(); |
805 | } |
806 | else if (nBytes == 0) |
807 | { |
808 | // socket closed gracefully |
809 | if (!pnode->fDisconnect) |
810 | printf("socket closed\n"); |
811 | pnode->CloseSocketDisconnect(); |
812 | } |
813 | else if (nBytes < 0) |
814 | { |
815 | // error |
816 | int nErr = WSAGetLastError(); |
817 | if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE && nErr != WSAEINTR && nErr != WSAEINPROGRESS) |
818 | { |
819 | if (!pnode->fDisconnect) |
820 | printf("socket recv error %d\n", nErr); |
821 | pnode->CloseSocketDisconnect(); |
822 | } |
823 | } |
824 | } |
825 | } |
826 | } |
827 | |
828 | // |
829 | // Send |
830 | // |
831 | if (pnode->hSocket == INVALID_SOCKET) |
832 | continue; |
833 | if (FD_ISSET(pnode->hSocket, &fdsetSend)) |
834 | { |
835 | TRY_CRITICAL_BLOCK(pnode->cs_vSend) |
836 | { |
837 | CDataStream& vSend = pnode->vSend; |
838 | if (!vSend.empty()) |
839 | { |
840 | int nBytes = send(pnode->hSocket, &vSend[0], vSend.size(), MSG_NOSIGNAL | MSG_DONTWAIT); |
841 | if (nBytes > 0) |
842 | { |
843 | vSend.erase(vSend.begin(), vSend.begin() + nBytes); |
844 | pnode->nLastSend = GetTime(); |
845 | } |
846 | else if (nBytes < 0) |
847 | { |
848 | // error |
849 | int nErr = WSAGetLastError(); |
850 | if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE && nErr != WSAEINTR && nErr != WSAEINPROGRESS) |
851 | { |
852 | printf("socket send error %d\n", nErr); |
853 | pnode->CloseSocketDisconnect(); |
854 | } |
855 | } |
856 | if (vSend.size() > SendBufferSize()) { |
857 | if (!pnode->fDisconnect) |
858 | printf("socket send flood control disconnect (%d bytes)\n", vSend.size()); |
859 | pnode->CloseSocketDisconnect(); |
860 | } |
861 | } |
862 | } |
863 | } |
864 | |
865 | // |
866 | // Inactivity checking |
867 | // |
868 | if (pnode->vSend.empty()) |
869 | pnode->nLastSendEmpty = GetTime(); |
870 | if (GetTime() - pnode->nTimeConnected > 60) |
871 | { |
872 | if (pnode->nLastRecv == 0 || pnode->nLastSend == 0) |
873 | { |
874 | printf("socket no message in first 60 seconds, %d %d\n", pnode->nLastRecv != 0, pnode->nLastSend != 0); |
875 | pnode->fDisconnect = true; |
876 | } |
877 | else if (GetTime() - pnode->nLastSend > 90*60 && GetTime() - pnode->nLastSendEmpty > 90*60) |
878 | { |
879 | printf("socket not sending\n"); |
880 | pnode->fDisconnect = true; |
881 | } |
882 | else if (GetTime() - pnode->nLastRecv > 90*60) |
883 | { |
884 | printf("socket inactivity timeout\n"); |
885 | pnode->fDisconnect = true; |
886 | } |
887 | } |
888 | } |
889 | CRITICAL_BLOCK(cs_vNodes) |
890 | { |
891 | BOOST_FOREACH(CNode* pnode, vNodesCopy) |
892 | pnode->Release(); |
893 | } |
894 | |
895 | Sleep(10); |
896 | } |
897 | } |
898 | |
899 | |
900 | void ThreadOpenConnections(void* parg) |
901 | { |
902 | IMPLEMENT_RANDOMIZE_STACK(ThreadOpenConnections(parg)); |
903 | try |
904 | { |
905 | vnThreadsRunning[1]++; |
906 | ThreadOpenConnections2(parg); |
907 | vnThreadsRunning[1]--; |
908 | } |
909 | catch (std::exception& e) { |
910 | vnThreadsRunning[1]--; |
911 | PrintException(&e, "ThreadOpenConnections()"); |
912 | } catch (...) { |
913 | vnThreadsRunning[1]--; |
914 | PrintException(NULL, "ThreadOpenConnections()"); |
915 | } |
916 | printf("ThreadOpenConnections exiting\n"); |
917 | } |
918 | |
919 | void ThreadOpenConnections2(void* parg) |
920 | { |
921 | printf("ThreadOpenConnections started\n"); |
922 | |
923 | // Connect to specific addresses |
924 | if (mapArgs.count("-connect")) |
925 | { |
926 | for (int64 nLoop = 0;; nLoop++) |
927 | { |
928 | BOOST_FOREACH(string strAddr, mapMultiArgs["-connect"]) |
929 | { |
930 | CAddress addr(strAddr); |
931 | if (addr.IsValid()) |
932 | OpenNetworkConnection(addr); |
933 | for (int i = 0; i < 10 && i < nLoop; i++) |
934 | { |
935 | Sleep(500); |
936 | if (fShutdown) |
937 | return; |
938 | } |
939 | } |
940 | } |
941 | } |
942 | |
943 | // Connect to manually added nodes first |
944 | if (mapArgs.count("-addnode")) |
945 | { |
946 | BOOST_FOREACH(string strAddr, mapMultiArgs["-addnode"]) |
947 | { |
948 | CAddress addr(strAddr); |
949 | if (addr.IsValid()) |
950 | { |
951 | OpenNetworkConnection(addr); |
952 | Sleep(500); |
953 | if (fShutdown) |
954 | return; |
955 | } |
956 | } |
957 | } |
958 | |
959 | // Initiate network connections |
960 | int64 nStart = GetTime(); |
961 | loop |
962 | { |
963 | vnThreadsRunning[1]--; |
964 | Sleep(500); |
965 | vnThreadsRunning[1]++; |
966 | if (fShutdown) |
967 | return; |
968 | |
969 | // Limit outbound connections |
970 | loop |
971 | { |
972 | int nOutbound = 0; |
973 | CRITICAL_BLOCK(cs_vNodes) |
974 | BOOST_FOREACH(CNode* pnode, vNodes) |
975 | if (!pnode->fInbound) |
976 | nOutbound++; |
977 | int nMaxOutboundConnections = MAX_OUTBOUND_CONNECTIONS; |
978 | nMaxOutboundConnections = min(nMaxOutboundConnections, (int)GetArg("-maxconnections", 125)); |
979 | if (nOutbound < nMaxOutboundConnections) |
980 | break; |
981 | vnThreadsRunning[1]--; |
982 | Sleep(2000); |
983 | vnThreadsRunning[1]++; |
984 | if (fShutdown) |
985 | return; |
986 | } |
987 | |
988 | // |
989 | // Choose an address to connect to based on most recently seen |
990 | // |
991 | CAddress addrConnect; |
992 | int64 nBest = INT64_MIN; |
993 | |
994 | // Only connect to one address per a.b.?.? range. |
995 | // Do this here so we don't have to critsect vNodes inside mapAddresses critsect. |
996 | set<unsigned int> setConnected; |
997 | CRITICAL_BLOCK(cs_vNodes) |
998 | BOOST_FOREACH(CNode* pnode, vNodes) |
999 | setConnected.insert(pnode->addr.ip & 0x0000ffff); |
1000 | |
1001 | int64 nANow = GetAdjustedTime(); |
1002 | |
1003 | CRITICAL_BLOCK(cs_mapAddresses) |
1004 | { |
1005 | BOOST_FOREACH(const PAIRTYPE(vector<unsigned char>, CAddress)& item, mapAddresses) |
1006 | { |
1007 | const CAddress& addr = item.second; |
1008 | if (!addr.IsIPv4() || !addr.IsValid() || setConnected.count(addr.ip & 0x0000ffff)) |
1009 | continue; |
1010 | int64 nSinceLastSeen = nANow - addr.nTime; |
1011 | int64 nSinceLastTry = nANow - addr.nLastTry; |
1012 | |
1013 | // Randomize the order in a deterministic way, putting the standard port first |
1014 | int64 nRandomizer = (uint64)(nStart * 4951 + addr.nLastTry * 9567851 + addr.ip * 7789) % (2 * 60 * 60); |
1015 | if (addr.port != htons(GetDefaultPort())) |
1016 | nRandomizer += 2 * 60 * 60; |
1017 | |
1018 | // Last seen Base retry frequency |
1019 | // <1 hour 10 min |
1020 | // 1 hour 1 hour |
1021 | // 4 hours 2 hours |
1022 | // 24 hours 5 hours |
1023 | // 48 hours 7 hours |
1024 | // 7 days 13 hours |
1025 | // 30 days 27 hours |
1026 | // 90 days 46 hours |
1027 | // 365 days 93 hours |
1028 | int64 nDelay = (int64)(3600.0 * sqrt(fabs((double)nSinceLastSeen) / 3600.0) + nRandomizer); |
1029 | |
1030 | // Fast reconnect for one hour after last seen |
1031 | if (nSinceLastSeen < 60 * 60) |
1032 | nDelay = 10 * 60; |
1033 | |
1034 | // Limit retry frequency |
1035 | if (nSinceLastTry < nDelay) |
1036 | continue; |
1037 | |
1038 | // Only try the old stuff if we don't have enough connections |
1039 | if (vNodes.size() >= 8 && nSinceLastSeen > 24 * 60 * 60) |
1040 | continue; |
1041 | |
1042 | // If multiple addresses are ready, prioritize by time since |
1043 | // last seen and time since last tried. |
1044 | int64 nScore = min(nSinceLastTry, (int64)24 * 60 * 60) - nSinceLastSeen - nRandomizer; |
1045 | if (nScore > nBest) |
1046 | { |
1047 | nBest = nScore; |
1048 | addrConnect = addr; |
1049 | } |
1050 | } |
1051 | } |
1052 | |
1053 | if (addrConnect.IsValid()) |
1054 | OpenNetworkConnection(addrConnect); |
1055 | } |
1056 | } |
1057 | |
1058 | bool OpenNetworkConnection(const CAddress& addrConnect) |
1059 | { |
1060 | // |
1061 | // Initiate outbound network connection |
1062 | // |
1063 | if (fShutdown) |
1064 | return false; |
1065 | if (addrConnect.ip == addrLocalHost.ip || !addrConnect.IsIPv4() || |
1066 | FindNode(addrConnect.ip) || CNode::IsBanned(addrConnect.ip)) |
1067 | return false; |
1068 | |
1069 | vnThreadsRunning[1]--; |
1070 | CNode* pnode = ConnectNode(addrConnect); |
1071 | vnThreadsRunning[1]++; |
1072 | if (fShutdown) |
1073 | return false; |
1074 | if (!pnode) |
1075 | return false; |
1076 | pnode->fNetworkNode = true; |
1077 | |
1078 | return true; |
1079 | } |
1080 | |
1081 | |
1082 | |
1083 | |
1084 | |
1085 | |
1086 | |
1087 | |
1088 | void ThreadMessageHandler(void* parg) |
1089 | { |
1090 | IMPLEMENT_RANDOMIZE_STACK(ThreadMessageHandler(parg)); |
1091 | try |
1092 | { |
1093 | vnThreadsRunning[2]++; |
1094 | ThreadMessageHandler2(parg); |
1095 | vnThreadsRunning[2]--; |
1096 | } |
1097 | catch (std::exception& e) { |
1098 | vnThreadsRunning[2]--; |
1099 | PrintException(&e, "ThreadMessageHandler()"); |
1100 | } catch (...) { |
1101 | vnThreadsRunning[2]--; |
1102 | PrintException(NULL, "ThreadMessageHandler()"); |
1103 | } |
1104 | printf("ThreadMessageHandler exiting\n"); |
1105 | } |
1106 | |
1107 | void ThreadMessageHandler2(void* parg) |
1108 | { |
1109 | printf("ThreadMessageHandler started\n"); |
1110 | SetThreadPriority(THREAD_PRIORITY_BELOW_NORMAL); |
1111 | while (!fShutdown) |
1112 | { |
1113 | vector<CNode*> vNodesCopy; |
1114 | CRITICAL_BLOCK(cs_vNodes) |
1115 | { |
1116 | vNodesCopy = vNodes; |
1117 | BOOST_FOREACH(CNode* pnode, vNodesCopy) |
1118 | pnode->AddRef(); |
1119 | } |
1120 | |
1121 | // Poll the connected nodes for messages |
1122 | CNode* pnodeTrickle = NULL; |
1123 | if (!vNodesCopy.empty()) |
1124 | pnodeTrickle = vNodesCopy[GetRand(vNodesCopy.size())]; |
1125 | BOOST_FOREACH(CNode* pnode, vNodesCopy) |
1126 | { |
1127 | // Receive messages |
1128 | TRY_CRITICAL_BLOCK(pnode->cs_vRecv) |
1129 | ProcessMessages(pnode); |
1130 | if (fShutdown) |
1131 | return; |
1132 | |
1133 | // Send messages |
1134 | TRY_CRITICAL_BLOCK(pnode->cs_vSend) |
1135 | SendMessages(pnode, pnode == pnodeTrickle); |
1136 | if (fShutdown) |
1137 | return; |
1138 | } |
1139 | |
1140 | CRITICAL_BLOCK(cs_vNodes) |
1141 | { |
1142 | BOOST_FOREACH(CNode* pnode, vNodesCopy) |
1143 | pnode->Release(); |
1144 | } |
1145 | |
1146 | // Wait and allow messages to bunch up. |
1147 | // Reduce vnThreadsRunning so StopNode has permission to exit while |
1148 | // we're sleeping, but we must always check fShutdown after doing this. |
1149 | vnThreadsRunning[2]--; |
1150 | Sleep(100); |
1151 | if (fRequestShutdown) |
1152 | Shutdown(NULL); |
1153 | vnThreadsRunning[2]++; |
1154 | if (fShutdown) |
1155 | return; |
1156 | } |
1157 | } |
1158 | |
1159 | |
1160 | |
1161 | |
1162 | |
1163 | |
1164 | bool BindListenPort(string& strError) |
1165 | { |
1166 | strError = ""; |
1167 | int nOne = 1; |
1168 | addrLocalHost.port = htons(GetListenPort()); |
1169 | |
1170 | // Create socket for listening for incoming connections |
1171 | hListenSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); |
1172 | if (hListenSocket == INVALID_SOCKET) |
1173 | { |
1174 | strError = strprintf("Error: Couldn't open socket for incoming connections (socket returned error %d)", WSAGetLastError()); |
1175 | printf("%s\n", strError.c_str()); |
1176 | return false; |
1177 | } |
1178 | |
1179 | #ifdef SO_NOSIGPIPE |
1180 | // Different way of disabling SIGPIPE on BSD |
1181 | setsockopt(hListenSocket, SOL_SOCKET, SO_NOSIGPIPE, (void*)&nOne, sizeof(int)); |
1182 | #endif |
1183 | |
1184 | // Allow binding if the port is still in TIME_WAIT state after |
1185 | // the program was closed and restarted. Not an issue on windows. |
1186 | setsockopt(hListenSocket, SOL_SOCKET, SO_REUSEADDR, (void*)&nOne, sizeof(int)); |
1187 | |
1188 | if (fcntl(hListenSocket, F_SETFL, O_NONBLOCK) == SOCKET_ERROR) |
1189 | { |
1190 | strError = strprintf("Error: Couldn't set properties on socket for incoming connections (error %d)", WSAGetLastError()); |
1191 | printf("%s\n", strError.c_str()); |
1192 | return false; |
1193 | } |
1194 | |
1195 | // The sockaddr_in structure specifies the address family, |
1196 | // IP address, and port for the socket that is being bound |
1197 | struct sockaddr_in sockaddr; |
1198 | memset(&sockaddr, 0, sizeof(sockaddr)); |
1199 | sockaddr.sin_family = AF_INET; |
1200 | sockaddr.sin_addr.s_addr = INADDR_ANY; // bind to all IPs on this computer |
1201 | sockaddr.sin_port = htons(GetListenPort()); |
1202 | if (::bind(hListenSocket, (struct sockaddr*)&sockaddr, sizeof(sockaddr)) == SOCKET_ERROR) |
1203 | { |
1204 | int nErr = WSAGetLastError(); |
1205 | if (nErr == WSAEADDRINUSE) |
1206 | strError = strprintf(_("Unable to bind to port %d on this computer. Bitcoin is probably already running."), ntohs(sockaddr.sin_port)); |
1207 | else |
1208 | strError = strprintf("Error: Unable to bind to port %d on this computer (bind returned error %d)", ntohs(sockaddr.sin_port), nErr); |
1209 | printf("%s\n", strError.c_str()); |
1210 | return false; |
1211 | } |
1212 | printf("Bound to port %d\n", ntohs(sockaddr.sin_port)); |
1213 | |
1214 | // Listen for incoming connections |
1215 | if (listen(hListenSocket, SOMAXCONN) == SOCKET_ERROR) |
1216 | { |
1217 | strError = strprintf("Error: Listening for incoming connections failed (listen returned error %d)", WSAGetLastError()); |
1218 | printf("%s\n", strError.c_str()); |
1219 | return false; |
1220 | } |
1221 | |
1222 | return true; |
1223 | } |
1224 | |
1225 | void StartNode(void* parg) |
1226 | { |
1227 | if (pnodeLocalHost == NULL) |
1228 | pnodeLocalHost = new CNode(INVALID_SOCKET, CAddress("127.0.0.1", 0, nLocalServices)); |
1229 | |
1230 | // Get local host ip |
1231 | struct ifaddrs* myaddrs; |
1232 | if (getifaddrs(&myaddrs) == 0) |
1233 | { |
1234 | for (struct ifaddrs* ifa = myaddrs; ifa != NULL; ifa = ifa->ifa_next) |
1235 | { |
1236 | if (ifa->ifa_addr == NULL) continue; |
1237 | if ((ifa->ifa_flags & IFF_UP) == 0) continue; |
1238 | if (strcmp(ifa->ifa_name, "lo") == 0) continue; |
1239 | if (strcmp(ifa->ifa_name, "lo0") == 0) continue; |
1240 | char pszIP[100]; |
1241 | if (ifa->ifa_addr->sa_family == AF_INET) |
1242 | { |
1243 | struct sockaddr_in* s4 = (struct sockaddr_in*)(ifa->ifa_addr); |
1244 | if (inet_ntop(ifa->ifa_addr->sa_family, (void*)&(s4->sin_addr), pszIP, sizeof(pszIP)) != NULL) |
1245 | printf("ipv4 %s: %s\n", ifa->ifa_name, pszIP); |
1246 | |
1247 | // Take the first IP that isn't loopback 127.x.x.x |
1248 | CAddress addr(*(unsigned int*)&s4->sin_addr, GetListenPort(), nLocalServices); |
1249 | if (addr.IsValid() && addr.GetByte(3) != 127) |
1250 | { |
1251 | addrLocalHost = addr; |
1252 | break; |
1253 | } |
1254 | } |
1255 | else if (ifa->ifa_addr->sa_family == AF_INET6) |
1256 | { |
1257 | struct sockaddr_in6* s6 = (struct sockaddr_in6*)(ifa->ifa_addr); |
1258 | if (inet_ntop(ifa->ifa_addr->sa_family, (void*)&(s6->sin6_addr), pszIP, sizeof(pszIP)) != NULL) |
1259 | printf("ipv6 %s: %s\n", ifa->ifa_name, pszIP); |
1260 | } |
1261 | } |
1262 | freeifaddrs(myaddrs); |
1263 | } |
1264 | |
1265 | printf("addrLocalHost = %s\n", addrLocalHost.ToString().c_str()); |
1266 | |
1267 | if (fUseProxy || mapArgs.count("-connect") || fNoListen) |
1268 | { |
1269 | // Proxies can't take incoming connections |
1270 | addrLocalHost.ip = CAddress("0.0.0.0").ip; |
1271 | } |
1272 | else |
1273 | { |
1274 | addrLocalHost.ip = CAddress(mapArgs["-myip"]).ip; |
1275 | if (!addrLocalHost.IsValid()) |
1276 | throw runtime_error(strprintf(_("You must set myip=<ipaddress> on the command line or in the configuration file:\n%s\n" |
1277 | "If the file does not exist, create it with owner-readable-only file permissions."), |
1278 | GetConfigFile().c_str())); |
1279 | } |
1280 | |
1281 | printf("addrLocalHost = %s\n", addrLocalHost.ToString().c_str()); |
1282 | |
1283 | // |
1284 | // Start threads |
1285 | // |
1286 | |
1287 | // Send and receive from sockets, accept connections |
1288 | if (!CreateThread(ThreadSocketHandler, NULL)) |
1289 | printf("Error: CreateThread(ThreadSocketHandler) failed\n"); |
1290 | |
1291 | // Initiate outbound connections |
1292 | if (!CreateThread(ThreadOpenConnections, NULL)) |
1293 | printf("Error: CreateThread(ThreadOpenConnections) failed\n"); |
1294 | |
1295 | // Process messages |
1296 | if (!CreateThread(ThreadMessageHandler, NULL)) |
1297 | printf("Error: CreateThread(ThreadMessageHandler) failed\n"); |
1298 | |
1299 | // Generate coins in the background |
1300 | GenerateBitcoins(fGenerateBitcoins, pwalletMain); |
1301 | } |
1302 | |
1303 | bool StopNode() |
1304 | { |
1305 | printf("StopNode()\n"); |
1306 | fShutdown = true; |
1307 | nTransactionsUpdated++; |
1308 | int64 nStart = GetTime(); |
1309 | while (vnThreadsRunning[0] > 0 || vnThreadsRunning[1] > 0 || vnThreadsRunning[2] > 0 || vnThreadsRunning[3] > 0 || vnThreadsRunning[4] > 0 |
1310 | ) |
1311 | { |
1312 | if (GetTime() - nStart > 20) |
1313 | break; |
1314 | Sleep(20); |
1315 | } |
1316 | if (vnThreadsRunning[0] > 0) printf("ThreadSocketHandler still running\n"); |
1317 | if (vnThreadsRunning[1] > 0) printf("ThreadOpenConnections still running\n"); |
1318 | if (vnThreadsRunning[2] > 0) printf("ThreadMessageHandler still running\n"); |
1319 | if (vnThreadsRunning[3] > 0) printf("ThreadBitcoinMiner still running\n"); |
1320 | if (vnThreadsRunning[4] > 0) printf("ThreadRPCServer still running\n"); |
1321 | while (vnThreadsRunning[2] > 0 || vnThreadsRunning[4] > 0) |
1322 | Sleep(20); |
1323 | Sleep(50); |
1324 | |
1325 | return true; |
1326 | } |
1327 | |
1328 | class CNetCleanup |
1329 | { |
1330 | public: |
1331 | CNetCleanup() |
1332 | { |
1333 | } |
1334 | ~CNetCleanup() |
1335 | { |
1336 | // Close sockets |
1337 | BOOST_FOREACH(CNode* pnode, vNodes) |
1338 | if (pnode->hSocket != INVALID_SOCKET) |
1339 | closesocket(pnode->hSocket); |
1340 | if (hListenSocket != INVALID_SOCKET) |
1341 | if (closesocket(hListenSocket) == SOCKET_ERROR) |
1342 | printf("closesocket(hListenSocket) failed with error %d\n", WSAGetLastError()); |
1343 | |
1344 | } |
1345 | } |
1346 | instance_of_cnetcleanup; |