1 from __future__ import division
8 from twisted.internet import defer, protocol, reactor
9 from twisted.python import failure, log
12 from p2pool import data as p2pool_data
13 from p2pool.bitcoin import data as bitcoin_data
14 from p2pool.util import deferral, p2protocol, pack, variable
16 class PeerMisbehavingError(Exception):
20 def fragment(f, **kwargs):
23 except p2protocol.TooLong:
24 fragment(f, **dict((k, v[:len(v)//2]) for k, v in kwargs.iteritems()))
25 fragment(f, **dict((k, v[len(v)//2:]) for k, v in kwargs.iteritems()))
27 class Protocol(p2protocol.Protocol):
30 max_remembered_txs_size = 2500000
32 def __init__(self, node, incoming):
33 p2protocol.Protocol.__init__(self, node.net.PREFIX, 1000000, node.traffic_happened)
35 self.incoming = incoming
37 self.other_version = None
38 self.connected2 = False
40 def connectionMade(self):
41 self.factory.proto_made_connection(self)
43 self.connection_lost_event = variable.Event()
45 self.addr = self.transport.getPeer().host, self.transport.getPeer().port
52 address=self.transport.getPeer().host,
53 port=self.transport.getPeer().port,
57 address=self.transport.getHost().host,
58 port=self.transport.getHost().port,
60 nonce=self.node.nonce,
61 sub_version=p2pool.__version__,
63 best_share_hash=self.node.best_share_hash_func(),
66 self.timeout_delayed = reactor.callLater(10, self._connect_timeout)
68 self.get_shares = deferral.GenericDeferrer(
70 func=lambda id, hashes, parents, stops: self.send_sharereq(id=id, hashes=hashes, parents=parents, stops=stops),
72 on_timeout=self.disconnect,
75 self.remote_tx_hashes = set() # view of peer's known_txs # not actually initially empty, but sending txs instead of tx hashes won't hurt
76 self.remote_remembered_txs_size = 0
78 self.remembered_txs = {} # view of peer's mining_txs
79 self.remembered_txs_size = 0
80 self.known_txs_cache = {}
82 def _connect_timeout(self):
83 self.timeout_delayed = None
84 print 'Handshake timed out, disconnecting from %s:%i' % self.addr
87 def packetReceived(self, command, payload2):
89 if command != 'version' and not self.connected2:
90 raise PeerMisbehavingError('first message was not version message')
91 p2protocol.Protocol.packetReceived(self, command, payload2)
92 except PeerMisbehavingError, e:
93 print 'Peer %s:%i misbehaving, will drop and ban. Reason:' % self.addr, e.message
94 self.badPeerHappened()
96 def badPeerHappened(self):
98 print "Bad peer banned:", self.addr
100 if self.transport.getPeer().host != '127.0.0.1': # never ban localhost
101 self.node.bans[self.transport.getPeer().host] = time.time() + 60*60
104 self.timeout_delayed = None
105 print 'Connection timed out, disconnecting from %s:%i' % self.addr
108 message_version = pack.ComposedType([
109 ('version', pack.IntType(32)),
110 ('services', pack.IntType(64)),
111 ('addr_to', bitcoin_data.address_type),
112 ('addr_from', bitcoin_data.address_type),
113 ('nonce', pack.IntType(64)),
114 ('sub_version', pack.VarStrType()),
115 ('mode', pack.IntType(32)), # always 1 for legacy compatibility
116 ('best_share_hash', pack.PossiblyNoneType(0, pack.IntType(256))),
118 def handle_version(self, version, services, addr_to, addr_from, nonce, sub_version, mode, best_share_hash):
119 if self.other_version is not None:
120 raise PeerMisbehavingError('more than one version message')
122 raise PeerMisbehavingError('peer too old')
124 self.other_version = version
125 self.other_sub_version = sub_version[:512]
126 self.other_services = services
128 if nonce == self.node.nonce:
129 raise PeerMisbehavingError('was connected to self')
130 if nonce in self.node.peers:
132 print 'Detected duplicate connection, disconnecting from %s:%i' % self.addr
137 self.connected2 = True
139 self.timeout_delayed.cancel()
140 self.timeout_delayed = reactor.callLater(100, self._timeout)
142 old_dataReceived = self.dataReceived
143 def new_dataReceived(data):
144 if self.timeout_delayed is not None:
145 self.timeout_delayed.reset(100)
146 old_dataReceived(data)
147 self.dataReceived = new_dataReceived
149 self.factory.proto_connected(self)
151 self._stop_thread = deferral.run_repeatedly(lambda: [
153 random.expovariate(1/100)][-1])
155 if self.node.advertise_ip:
156 self._stop_thread2 = deferral.run_repeatedly(lambda: [
157 self.send_addrme(port=self.node.serverfactory.listen_port.getHost().port) if self.node.serverfactory.listen_port is not None else None,
158 random.expovariate(1/(100*len(self.node.peers) + 1))][-1])
160 if best_share_hash is not None:
161 self.node.handle_share_hashes([best_share_hash], self)
163 def update_remote_view_of_my_known_txs(before, after):
164 added = set(after) - set(before)
165 removed = set(before) - set(after)
167 self.send_have_tx(tx_hashes=list(added))
169 self.send_losing_tx(tx_hashes=list(removed))
171 # cache forgotten txs here for a little while so latency of "losing_tx" packets doesn't cause problems
172 key = max(self.known_txs_cache) + 1 if self.known_txs_cache else 0
173 self.known_txs_cache[key] = dict((h, before[h]) for h in removed)
174 reactor.callLater(20, self.known_txs_cache.pop, key)
175 watch_id = self.node.known_txs_var.transitioned.watch(update_remote_view_of_my_known_txs)
176 self.connection_lost_event.watch(lambda: self.node.known_txs_var.transitioned.unwatch(watch_id))
178 self.send_have_tx(tx_hashes=self.node.known_txs_var.value.keys())
180 def update_remote_view_of_my_mining_txs(before, after):
181 added = set(after) - set(before)
182 removed = set(before) - set(after)
184 self.remote_remembered_txs_size += sum(100 + bitcoin_data.tx_type.packed_size(after[x]) for x in added)
185 assert self.remote_remembered_txs_size <= self.max_remembered_txs_size
186 fragment(self.send_remember_tx, tx_hashes=[x for x in added if x in self.remote_tx_hashes], txs=[after[x] for x in added if x not in self.remote_tx_hashes])
188 self.send_forget_tx(tx_hashes=list(removed))
189 self.remote_remembered_txs_size -= sum(100 + bitcoin_data.tx_type.packed_size(before[x]) for x in removed)
190 watch_id2 = self.node.mining_txs_var.transitioned.watch(update_remote_view_of_my_mining_txs)
191 self.connection_lost_event.watch(lambda: self.node.mining_txs_var.transitioned.unwatch(watch_id2))
193 self.remote_remembered_txs_size += sum(100 + bitcoin_data.tx_type.packed_size(x) for x in self.node.mining_txs_var.value.values())
194 assert self.remote_remembered_txs_size <= self.max_remembered_txs_size
195 fragment(self.send_remember_tx, tx_hashes=[], txs=self.node.mining_txs_var.value.values())
197 message_ping = pack.ComposedType([])
198 def handle_ping(self):
201 message_addrme = pack.ComposedType([
202 ('port', pack.IntType(16)),
204 def handle_addrme(self, port):
205 host = self.transport.getPeer().host
206 #print 'addrme from', host, port
207 if host == '127.0.0.1':
208 if random.random() < .8 and self.node.peers:
209 random.choice(self.node.peers.values()).send_addrme(port=port) # services...
211 self.node.got_addr((self.transport.getPeer().host, port), self.other_services, int(time.time()))
212 if random.random() < .8 and self.node.peers:
213 random.choice(self.node.peers.values()).send_addrs(addrs=[
216 services=self.other_services,
220 timestamp=int(time.time()),
224 message_addrs = pack.ComposedType([
225 ('addrs', pack.ListType(pack.ComposedType([
226 ('timestamp', pack.IntType(64)),
227 ('address', bitcoin_data.address_type),
230 def handle_addrs(self, addrs):
231 for addr_record in addrs:
232 self.node.got_addr((addr_record['address']['address'], addr_record['address']['port']), addr_record['address']['services'], min(int(time.time()), addr_record['timestamp']))
233 if random.random() < .8 and self.node.peers:
234 random.choice(self.node.peers.values()).send_addrs(addrs=[addr_record])
236 message_getaddrs = pack.ComposedType([
237 ('count', pack.IntType(32)),
239 def handle_getaddrs(self, count):
242 self.send_addrs(addrs=[
244 timestamp=int(self.node.addr_store[host, port][2]),
246 services=self.node.addr_store[host, port][0],
251 self.node.get_good_peers(count)
254 message_shares = pack.ComposedType([
255 ('shares', pack.ListType(p2pool_data.share_type)),
257 def handle_shares(self, shares):
259 for wrappedshare in shares:
260 if wrappedshare['type'] < p2pool_data.Share.VERSION: continue
261 share = p2pool_data.load_share(wrappedshare, self.node.net, self.addr)
262 if wrappedshare['type'] >= 13:
264 for tx_hash in share.share_info['new_transaction_hashes']:
265 if tx_hash in self.node.known_txs_var.value:
266 tx = self.node.known_txs_var.value[tx_hash]
268 for cache in self.known_txs_cache.itervalues():
271 print 'Transaction %064x rescued from peer latency cache!' % (tx_hash,)
274 print >>sys.stderr, 'Peer referenced unknown transaction %064x, disconnecting' % (tx_hash,)
281 result.append((share, txs))
283 self.node.handle_shares(result, self)
285 def sendShares(self, shares, tracker, known_txs, include_txs_with=[]):
288 if share.VERSION >= 13:
289 # send full transaction for every new_transaction_hash that peer does not know
290 for tx_hash in share.share_info['new_transaction_hashes']:
291 assert tx_hash in known_txs, 'tried to broadcast share without knowing all its new transactions'
292 if tx_hash not in self.remote_tx_hashes:
293 tx_hashes.add(tx_hash)
295 if share.hash in include_txs_with:
296 x = share.get_other_tx_hashes(tracker)
300 hashes_to_send = [x for x in tx_hashes if x not in self.node.mining_txs_var.value and x in known_txs]
302 new_remote_remembered_txs_size = self.remote_remembered_txs_size + sum(100 + bitcoin_data.tx_type.packed_size(known_txs[x]) for x in hashes_to_send)
303 if new_remote_remembered_txs_size > self.max_remembered_txs_size:
304 raise ValueError('shares have too many txs')
305 self.remote_remembered_txs_size = new_remote_remembered_txs_size
307 fragment(self.send_remember_tx, tx_hashes=[x for x in hashes_to_send if x in self.remote_tx_hashes], txs=[known_txs[x] for x in hashes_to_send if x not in self.remote_tx_hashes])
309 fragment(self.send_shares, shares=[share.as_share() for share in shares])
311 self.send_forget_tx(tx_hashes=hashes_to_send)
313 self.remote_remembered_txs_size -= sum(100 + bitcoin_data.tx_type.packed_size(known_txs[x]) for x in hashes_to_send)
316 message_sharereq = pack.ComposedType([
317 ('id', pack.IntType(256)),
318 ('hashes', pack.ListType(pack.IntType(256))),
319 ('parents', pack.VarIntType()),
320 ('stops', pack.ListType(pack.IntType(256))),
322 def handle_sharereq(self, id, hashes, parents, stops):
323 shares = self.node.handle_get_shares(hashes, parents, stops, self)
325 self.send_sharereply(id=id, result='good', shares=[share.as_share() for share in shares])
326 except p2protocol.TooLong:
327 self.send_sharereply(id=id, result='too long', shares=[])
329 message_sharereply = pack.ComposedType([
330 ('id', pack.IntType(256)),
331 ('result', pack.EnumType(pack.VarIntType(), {0: 'good', 1: 'too long', 2: 'unk2', 3: 'unk3', 4: 'unk4', 5: 'unk5', 6: 'unk6'})),
332 ('shares', pack.ListType(p2pool_data.share_type)),
334 class ShareReplyError(Exception): pass
335 def handle_sharereply(self, id, result, shares):
337 res = [p2pool_data.load_share(share, self.node.net, self.addr) for share in shares if share['type'] >= p2pool_data.Share.VERSION]
339 res = failure.Failure(self.ShareReplyError(result))
340 self.get_shares.got_response(id, res)
343 message_bestblock = pack.ComposedType([
344 ('header', bitcoin_data.block_header_type),
346 def handle_bestblock(self, header):
347 self.node.handle_bestblock(header, self)
350 message_have_tx = pack.ComposedType([
351 ('tx_hashes', pack.ListType(pack.IntType(256))),
353 def handle_have_tx(self, tx_hashes):
354 #assert self.remote_tx_hashes.isdisjoint(tx_hashes)
355 self.remote_tx_hashes.update(tx_hashes)
356 while len(self.remote_tx_hashes) > 10000:
357 self.remote_tx_hashes.pop()
358 message_losing_tx = pack.ComposedType([
359 ('tx_hashes', pack.ListType(pack.IntType(256))),
361 def handle_losing_tx(self, tx_hashes):
362 #assert self.remote_tx_hashes.issuperset(tx_hashes)
363 self.remote_tx_hashes.difference_update(tx_hashes)
366 message_remember_tx = pack.ComposedType([
367 ('tx_hashes', pack.ListType(pack.IntType(256))),
368 ('txs', pack.ListType(bitcoin_data.tx_type)),
370 def handle_remember_tx(self, tx_hashes, txs):
371 for tx_hash in tx_hashes:
372 if tx_hash in self.remembered_txs:
373 print >>sys.stderr, 'Peer referenced transaction twice, disconnecting'
377 if tx_hash in self.node.known_txs_var.value:
378 tx = self.node.known_txs_var.value[tx_hash]
380 for cache in self.known_txs_cache.itervalues():
383 print 'Transaction %064x rescued from peer latency cache!' % (tx_hash,)
386 print >>sys.stderr, 'Peer referenced unknown transaction %064x, disconnecting' % (tx_hash,)
390 self.remembered_txs[tx_hash] = tx
391 self.remembered_txs_size += 100 + bitcoin_data.tx_type.packed_size(tx)
392 new_known_txs = dict(self.node.known_txs_var.value)
395 tx_hash = bitcoin_data.hash256(bitcoin_data.tx_type.pack(tx))
396 if tx_hash in self.remembered_txs:
397 print >>sys.stderr, 'Peer referenced transaction twice, disconnecting'
401 if tx_hash in self.node.known_txs_var.value and not warned:
402 print 'Peer sent entire transaction %064x that was already received' % (tx_hash,)
405 self.remembered_txs[tx_hash] = tx
406 self.remembered_txs_size += 100 + bitcoin_data.tx_type.packed_size(tx)
407 new_known_txs[tx_hash] = tx
408 self.node.known_txs_var.set(new_known_txs)
409 if self.remembered_txs_size >= self.max_remembered_txs_size:
410 raise PeerMisbehavingError('too much transaction data stored')
411 message_forget_tx = pack.ComposedType([
412 ('tx_hashes', pack.ListType(pack.IntType(256))),
414 def handle_forget_tx(self, tx_hashes):
415 for tx_hash in tx_hashes:
416 self.remembered_txs_size -= 100 + bitcoin_data.tx_type.packed_size(self.remembered_txs[tx_hash])
417 assert self.remembered_txs_size >= 0
418 del self.remembered_txs[tx_hash]
421 def connectionLost(self, reason):
422 self.connection_lost_event.happened()
423 if self.timeout_delayed is not None:
424 self.timeout_delayed.cancel()
426 self.factory.proto_disconnected(self, reason)
428 if self.node.advertise_ip:
430 self.connected2 = False
431 self.factory.proto_lost_connection(self, reason)
433 print "Peer connection lost:", self.addr, reason
434 self.get_shares.respond_all(reason)
436 @defer.inlineCallbacks
438 start = reactor.seconds()
439 yield self.get_shares(hashes=[0], parents=0, stops=[])
440 end = reactor.seconds()
441 defer.returnValue(end - start)
443 class ServerFactory(protocol.ServerFactory):
444 def __init__(self, node, max_conns):
446 self.max_conns = max_conns
450 self.listen_port = None
452 def buildProtocol(self, addr):
453 if sum(self.conns.itervalues()) >= self.max_conns or self.conns.get(self._host_to_ident(addr.host), 0) >= 3:
455 if addr.host in self.node.bans and self.node.bans[addr.host] > time.time():
457 p = Protocol(self.node, True)
460 print "Got peer connection from:", addr
463 def _host_to_ident(self, host):
464 a, b, c, d = host.split('.')
467 def proto_made_connection(self, proto):
468 ident = self._host_to_ident(proto.transport.getPeer().host)
469 self.conns[ident] = self.conns.get(ident, 0) + 1
470 def proto_lost_connection(self, proto, reason):
471 ident = self._host_to_ident(proto.transport.getPeer().host)
472 self.conns[ident] -= 1
473 if not self.conns[ident]:
474 del self.conns[ident]
476 def proto_connected(self, proto):
477 self.node.got_conn(proto)
478 def proto_disconnected(self, proto, reason):
479 self.node.lost_conn(proto, reason)
482 assert not self.running
485 def attempt_listen():
487 self.listen_port = reactor.listenTCP(self.node.port, self)
488 deferral.retry('Error binding to P2P port:', traceback=False)(attempt_listen)()
494 return self.listen_port.stopListening()
496 class ClientFactory(protocol.ClientFactory):
497 def __init__(self, node, desired_conns, max_attempts):
499 self.desired_conns = desired_conns
500 self.max_attempts = max_attempts
502 self.attempts = set()
506 def _host_to_ident(self, host):
507 a, b, c, d = host.split('.')
510 def buildProtocol(self, addr):
511 p = Protocol(self.node, False)
515 def startedConnecting(self, connector):
516 ident = self._host_to_ident(connector.getDestination().host)
517 if ident in self.attempts:
518 raise AssertionError('already have attempt')
519 self.attempts.add(ident)
521 def clientConnectionFailed(self, connector, reason):
522 self.attempts.remove(self._host_to_ident(connector.getDestination().host))
524 def clientConnectionLost(self, connector, reason):
525 self.attempts.remove(self._host_to_ident(connector.getDestination().host))
527 def proto_made_connection(self, proto):
529 def proto_lost_connection(self, proto, reason):
532 def proto_connected(self, proto):
533 self.conns.add(proto)
534 self.node.got_conn(proto)
535 def proto_disconnected(self, proto, reason):
536 self.conns.remove(proto)
537 self.node.lost_conn(proto, reason)
540 assert not self.running
542 self._stop_thinking = deferral.run_repeatedly(self._think)
546 self._stop_thinking()
550 if len(self.conns) < self.desired_conns and len(self.attempts) < self.max_attempts and self.node.addr_store:
551 (host, port), = self.node.get_good_peers(1)
553 if self._host_to_ident(host) in self.attempts:
555 elif host in self.node.bans and self.node.bans[host] > time.time():
558 #print 'Trying to connect to', host, port
559 reactor.connectTCP(host, port, self, timeout=5)
563 return random.expovariate(1/1)
565 class SingleClientFactory(protocol.ReconnectingClientFactory):
566 def __init__(self, node):
569 def buildProtocol(self, addr):
570 p = Protocol(self.node, incoming=False)
574 def proto_made_connection(self, proto):
576 def proto_lost_connection(self, proto, reason):
579 def proto_connected(self, proto):
581 self.node.got_conn(proto)
582 def proto_disconnected(self, proto, reason):
583 self.node.lost_conn(proto, reason)
586 def __init__(self, best_share_hash_func, port, net, addr_store={}, connect_addrs=set(), desired_outgoing_conns=10, max_outgoing_attempts=30, max_incoming_conns=50, preferred_storage=1000, known_txs_var=variable.Variable({}), mining_txs_var=variable.Variable({}), advertise_ip=True):
587 self.best_share_hash_func = best_share_hash_func
590 self.addr_store = dict(addr_store)
591 self.connect_addrs = connect_addrs
592 self.preferred_storage = preferred_storage
593 self.known_txs_var = known_txs_var
594 self.mining_txs_var = mining_txs_var
595 self.advertise_ip = advertise_ip
597 self.traffic_happened = variable.Event()
598 self.nonce = random.randrange(2**64)
600 self.bans = {} # address -> end_time
601 self.clientfactory = ClientFactory(self, desired_outgoing_conns, max_outgoing_attempts)
602 self.serverfactory = ServerFactory(self, max_incoming_conns)
607 raise ValueError('already running')
609 self.clientfactory.start()
610 self.serverfactory.start()
611 self.singleclientconnectors = [reactor.connectTCP(addr, port, SingleClientFactory(self)) for addr, port in self.connect_addrs]
615 self._stop_thinking = deferral.run_repeatedly(self._think)
619 if len(self.addr_store) < self.preferred_storage and self.peers:
620 random.choice(self.peers.values()).send_getaddrs(count=8)
624 return random.expovariate(1/20)
626 @defer.inlineCallbacks
629 raise ValueError('already stopped')
633 self._stop_thinking()
634 yield self.clientfactory.stop()
635 yield self.serverfactory.stop()
636 for singleclientconnector in self.singleclientconnectors:
637 yield singleclientconnector.factory.stopTrying()
638 yield singleclientconnector.disconnect()
639 del self.singleclientconnectors
641 def got_conn(self, conn):
642 if conn.nonce in self.peers:
643 raise ValueError('already have peer')
644 self.peers[conn.nonce] = conn
646 print '%s connection to peer %s:%i established. p2pool version: %i %r' % ('Incoming' if conn.incoming else 'Outgoing', conn.addr[0], conn.addr[1], conn.other_version, conn.other_sub_version)
648 def lost_conn(self, conn, reason):
649 if conn.nonce not in self.peers:
650 raise ValueError('''don't have peer''')
651 if conn is not self.peers[conn.nonce]:
652 raise ValueError('wrong conn')
653 del self.peers[conn.nonce]
655 print 'Lost peer %s:%i - %s' % (conn.addr[0], conn.addr[1], reason.getErrorMessage())
658 def got_addr(self, (host, port), services, timestamp):
659 if (host, port) in self.addr_store:
660 old_services, old_first_seen, old_last_seen = self.addr_store[host, port]
661 self.addr_store[host, port] = services, old_first_seen, max(old_last_seen, timestamp)
663 if len(self.addr_store) < 10000:
664 self.addr_store[host, port] = services, timestamp, timestamp
666 def handle_shares(self, shares, peer):
667 print 'handle_shares', (shares, peer)
669 def handle_share_hashes(self, hashes, peer):
670 print 'handle_share_hashes', (hashes, peer)
672 def handle_get_shares(self, hashes, parents, stops, peer):
673 print 'handle_get_shares', (hashes, parents, stops, peer)
675 def handle_bestblock(self, header, peer):
676 print 'handle_bestblock', header
678 def get_good_peers(self, max_count):
680 return [x[0] for x in sorted(self.addr_store.iteritems(), key=lambda (k, (services, first_seen, last_seen)):
681 -math.log(max(3600, last_seen - first_seen))/math.log(max(3600, t - last_seen))*random.expovariate(1)