1 from __future__ import division
7 from twisted.internet import defer, protocol, reactor
8 from twisted.python import failure, log
11 from p2pool import data as p2pool_data
12 from p2pool.bitcoin import data as bitcoin_data
13 from p2pool.util import deferral, p2protocol, pack, variable
15 class PeerMisbehavingError(Exception):
19 def fragment(f, **kwargs):
22 except p2protocol.TooLong:
23 att(f, **dict((k, v[:len(v)//2]) for k, v in kwargs.iteritems()))
24 return att(f, **dict((k, v[len(v)//2:]) for k, v in kwargs.iteritems()))
26 class Protocol(p2protocol.Protocol):
27 max_remembered_txs_size = 2500000
29 def __init__(self, node, incoming):
30 p2protocol.Protocol.__init__(self, node.net.PREFIX, 1000000, node.traffic_happened)
32 self.incoming = incoming
34 self.other_version = None
35 self.connected2 = False
37 def connectionMade(self):
38 p2protocol.Protocol.connectionMade(self)
40 self.factory.proto_made_connection(self)
42 self.connection_lost_event = variable.Event()
44 self.addr = self.transport.getPeer().host, self.transport.getPeer().port
51 address=self.transport.getPeer().host,
52 port=self.transport.getPeer().port,
56 address=self.transport.getHost().host,
57 port=self.transport.getHost().port,
59 nonce=self.node.nonce,
60 sub_version=p2pool.__version__,
62 best_share_hash=self.node.best_share_hash_func(),
65 self.timeout_delayed = reactor.callLater(10, self._connect_timeout)
67 self.get_shares = deferral.GenericDeferrer(
69 func=lambda id, hashes, parents, stops: self.send_sharereq(id=id, hashes=hashes, parents=parents, stops=stops),
71 on_timeout=self.transport.loseConnection,
74 self.remote_tx_hashes = set() # view of peer's known_txs # not actually initially empty, but sending txs instead of tx hashes won't hurt
75 self.remote_remembered_txs_size = 0
77 self.remembered_txs = {} # view of peer's mining_txs
78 self.remembered_txs_size = 0
80 def _connect_timeout(self):
81 self.timeout_delayed = None
82 print 'Handshake timed out, disconnecting from %s:%i' % self.addr
83 self.transport.loseConnection()
85 def packetReceived(self, command, payload2):
87 if command != 'version' and not self.connected2:
88 raise PeerMisbehavingError('first message was not version message')
89 p2protocol.Protocol.packetReceived(self, command, payload2)
90 except PeerMisbehavingError, e:
91 print 'Peer %s:%i misbehaving, will drop and ban. Reason:' % self.addr, e.message
92 self.badPeerHappened()
94 def badPeerHappened(self):
96 print "Bad peer banned:", self.addr
97 self.transport.loseConnection()
98 self.node.bans[self.transport.getPeer().host] = time.time() + 60*60
101 self.timeout_delayed = None
102 print 'Connection timed out, disconnecting from %s:%i' % self.addr
103 self.transport.loseConnection()
105 message_version = pack.ComposedType([
106 ('version', pack.IntType(32)),
107 ('services', pack.IntType(64)),
108 ('addr_to', bitcoin_data.address_type),
109 ('addr_from', bitcoin_data.address_type),
110 ('nonce', pack.IntType(64)),
111 ('sub_version', pack.VarStrType()),
112 ('mode', pack.IntType(32)), # always 1 for legacy compatibility
113 ('best_share_hash', pack.PossiblyNoneType(0, pack.IntType(256))),
115 def handle_version(self, version, services, addr_to, addr_from, nonce, sub_version, mode, best_share_hash):
116 if self.other_version is not None:
117 raise PeerMisbehavingError('more than one version message')
119 raise PeerMisbehavingError('peer too old')
121 self.other_version = version
122 self.other_sub_version = sub_version[:512]
123 self.other_services = services
125 if nonce == self.node.nonce:
126 raise PeerMisbehavingError('was connected to self')
127 if nonce in self.node.peers:
129 print 'Detected duplicate connection, disconnecting from %s:%i' % self.addr
130 self.transport.loseConnection()
134 self.connected2 = True
136 self.timeout_delayed.cancel()
137 self.timeout_delayed = reactor.callLater(100, self._timeout)
139 old_dataReceived = self.dataReceived
140 def new_dataReceived(data):
141 if self.timeout_delayed is not None:
142 self.timeout_delayed.reset(100)
143 old_dataReceived(data)
144 self.dataReceived = new_dataReceived
146 self.factory.proto_connected(self)
148 self._stop_thread = deferral.run_repeatedly(lambda: [
150 random.expovariate(1/100)][-1])
152 self._stop_thread2 = deferral.run_repeatedly(lambda: [
153 self.send_addrme(port=self.node.port),
154 random.expovariate(1/(100*len(self.node.peers) + 1))][-1])
156 if best_share_hash is not None:
157 self.node.handle_share_hashes([best_share_hash], self)
159 def update_remote_view_of_my_known_txs(before, after):
160 added = set(after) - set(before)
161 removed = set(before) - set(after)
163 self.send_have_tx(tx_hashes=list(added))
165 self.send_losing_tx(tx_hashes=list(removed))
167 watch_id = self.node.known_txs_var.transitioned.watch(update_remote_view_of_my_known_txs)
168 self.connection_lost_event.watch(lambda: self.node.known_txs_var.transitioned.unwatch(watch_id))
170 self.send_have_tx(tx_hashes=self.node.known_txs_var.value.keys())
172 def update_remote_view_of_my_mining_txs(before, after):
173 added = set(after) - set(before)
174 removed = set(before) - set(after)
176 self.remote_remembered_txs_size += sum(len(bitcoin_data.tx_type.pack(after[x])) for x in added)
177 assert self.remote_remembered_txs_size <= self.max_remembered_txs_size
178 fragment(self.send_remember_tx, tx_hashes=[x for x in added if x in self.remote_tx_hashes], txs=[after[x] for x in added if x not in self.remote_tx_hashes])
180 self.send_forget_tx(tx_hashes=removed)
181 self.remote_remembered_txs_size -= sum(len(bitcoin_data.tx_type.pack(before[x])) for x in removed)
182 watch_id2 = self.node.mining_txs_var.transitioned.watch(update_remote_view_of_my_mining_txs)
183 self.connection_lost_event.watch(lambda: self.node.mining_txs_var.transitioned.unwatch(watch_id2))
185 self.remote_remembered_txs_size += sum(len(bitcoin_data.tx_type.pack(x)) for x in self.node.mining_txs_var.value.values())
186 assert self.remote_remembered_txs_size <= self.max_remembered_txs_size
187 fragment(self.send_remember_tx, tx_hashes=[], txs=self.node.mining_txs_var.value.values())
189 message_ping = pack.ComposedType([])
190 def handle_ping(self):
193 message_addrme = pack.ComposedType([
194 ('port', pack.IntType(16)),
196 def handle_addrme(self, port):
197 host = self.transport.getPeer().host
198 #print 'addrme from', host, port
199 if host == '127.0.0.1':
200 if random.random() < .8 and self.node.peers:
201 random.choice(self.node.peers.values()).send_addrme(port=port) # services...
203 self.node.got_addr((self.transport.getPeer().host, port), self.other_services, int(time.time()))
204 if random.random() < .8 and self.node.peers:
205 random.choice(self.node.peers.values()).send_addrs(addrs=[
208 services=self.other_services,
212 timestamp=int(time.time()),
216 message_addrs = pack.ComposedType([
217 ('addrs', pack.ListType(pack.ComposedType([
218 ('timestamp', pack.IntType(64)),
219 ('address', bitcoin_data.address_type),
222 def handle_addrs(self, addrs):
223 for addr_record in addrs:
224 self.node.got_addr((addr_record['address']['address'], addr_record['address']['port']), addr_record['address']['services'], min(int(time.time()), addr_record['timestamp']))
225 if random.random() < .8 and self.node.peers:
226 random.choice(self.node.peers.values()).send_addrs(addrs=[addr_record])
228 message_getaddrs = pack.ComposedType([
229 ('count', pack.IntType(32)),
231 def handle_getaddrs(self, count):
234 self.send_addrs(addrs=[
236 timestamp=int(self.node.addr_store[host, port][2]),
238 services=self.node.addr_store[host, port][0],
243 self.node.get_good_peers(count)
246 message_shares = pack.ComposedType([
247 ('shares', pack.ListType(p2pool_data.share_type)),
249 def handle_shares(self, shares):
250 self.node.handle_shares([p2pool_data.load_share(share, self.node.net, self) for share in shares if share['type'] not in [6, 7]], self)
252 def sendShares(self, shares, tracker, known_txs, include_txs_with=[]):
254 return defer.succeed(None)
258 if share.hash in include_txs_with:
259 tx_hashes.update(share.get_other_tx_hashes(tracker))
261 hashes_to_send = [x for x in tx_hashes if x not in self.node.mining_txs_var.value and x in known_txs]
263 new_remote_remembered_txs_size = self.remote_remembered_txs_size + sum(len(bitcoin_data.tx_type.pack(known_txs[x])) for x in hashes_to_send)
264 if new_remote_remembered_txs_size > self.max_remembered_txs_size:
265 raise ValueError('shares have too many txs')
266 self.remote_remembered_txs_size = new_remote_remembered_txs_size
268 fragment(self.send_remember_tx, tx_hashes=[x for x in hashes_to_send if x in self.remote_tx_hashes], txs=[known_txs[x] for x in hashes_to_send if x not in self.remote_tx_hashes])
269 fragment(self.send_shares, shares=[share.as_share() for share in shares])
270 res = self.send_forget_tx(tx_hashes=hashes_to_send)
272 self.remote_remembered_txs_size -= sum(len(bitcoin_data.tx_type.pack(known_txs[x])) for x in hashes_to_send)
277 message_sharereq = pack.ComposedType([
278 ('id', pack.IntType(256)),
279 ('hashes', pack.ListType(pack.IntType(256))),
280 ('parents', pack.VarIntType()),
281 ('stops', pack.ListType(pack.IntType(256))),
283 def handle_sharereq(self, id, hashes, parents, stops):
284 shares = self.node.handle_get_shares(hashes, parents, stops, self)
286 self.send_sharereply(id=id, result='good', shares=[share.as_share() for share in shares])
287 except p2protocol.TooLong:
288 self.send_sharereply(id=id, result='too long', shares=[])
290 message_sharereply = pack.ComposedType([
291 ('id', pack.IntType(256)),
292 ('result', pack.EnumType(pack.VarIntType(), {0: 'good', 1: 'too long', 2: 'unk2', 3: 'unk3', 4: 'unk4', 5: 'unk5', 6: 'unk6'})),
293 ('shares', pack.ListType(p2pool_data.share_type)),
295 def handle_sharereply(self, id, result, shares):
297 res = [p2pool_data.load_share(share, self.node.net, self) for share in shares if share['type'] not in [6, 7]]
299 res = failure.Failure("sharereply result: " + result)
300 self.get_shares.got_response(id, res)
303 message_bestblock = pack.ComposedType([
304 ('header', bitcoin_data.block_header_type),
306 def handle_bestblock(self, header):
307 self.node.handle_bestblock(header, self)
310 message_have_tx = pack.ComposedType([
311 ('tx_hashes', pack.ListType(pack.IntType(256))),
313 def handle_have_tx(self, tx_hashes):
314 self.remote_tx_hashes.update(tx_hashes)
315 message_losing_tx = pack.ComposedType([
316 ('tx_hashes', pack.ListType(pack.IntType(256))),
318 def handle_losing_tx(self, tx_hashes):
319 self.remote_tx_hashes.difference_update(tx_hashes)
322 message_remember_tx = pack.ComposedType([
323 ('tx_hashes', pack.ListType(pack.IntType(256))),
324 ('txs', pack.ListType(bitcoin_data.tx_type)),
326 def handle_remember_tx(self, tx_hashes, txs):
327 for tx_hash in tx_hashes:
328 if tx_hash not in self.remembered_txs:
329 self.remembered_txs[tx_hash] = self.node.known_txs_var.value[tx_hash]
330 self.remembered_txs_size += len(bitcoin_data.tx_type.pack(self.node.known_txs_var.value[tx_hash]))
331 new_known_txs = dict(self.node.known_txs_var.value)
333 tx_hash = bitcoin_data.hash256(bitcoin_data.tx_type.pack(tx))
334 if tx_hash not in self.remembered_txs:
335 self.remembered_txs[tx_hash] = tx
336 self.remembered_txs_size += len(bitcoin_data.tx_type.pack(tx))
337 new_known_txs[tx_hash] = tx
338 self.node.known_txs_var.set(new_known_txs)
339 if self.remembered_txs_size >= self.max_remembered_txs_size:
340 raise PeerMisbehavingError('too much transaction data stored')
341 message_forget_tx = pack.ComposedType([
342 ('tx_hashes', pack.ListType(pack.IntType(256))),
344 def handle_forget_tx(self, tx_hashes):
345 for tx_hash in tx_hashes:
346 self.remembered_txs_size -= len(bitcoin_data.tx_type.pack(self.remembered_txs[tx_hash]))
347 assert self.remembered_txs_size >= 0
348 del self.remembered_txs[tx_hash]
351 def connectionLost(self, reason):
352 self.connection_lost_event.happened()
353 if self.timeout_delayed is not None:
354 self.timeout_delayed.cancel()
356 self.factory.proto_disconnected(self, reason)
359 self.connected2 = False
360 self.factory.proto_lost_connection(self, reason)
362 print "Peer connection lost:", self.addr, reason
363 self.get_shares.respond_all(reason)
365 @defer.inlineCallbacks
367 start = reactor.seconds()
368 yield self.get_shares(hashes=[0], parents=0, stops=[])
369 end = reactor.seconds()
370 defer.returnValue(end - start)
372 class ServerFactory(protocol.ServerFactory):
373 def __init__(self, node, max_conns):
375 self.max_conns = max_conns
380 def buildProtocol(self, addr):
381 if sum(self.conns.itervalues()) >= self.max_conns or self.conns.get(self._host_to_ident(addr.host), 0) >= 3:
383 if addr.host in self.node.bans and self.node.bans[addr.host] > time.time():
385 p = Protocol(self.node, True)
388 print "Got peer connection from:", addr
391 def _host_to_ident(self, host):
392 a, b, c, d = host.split('.')
395 def proto_made_connection(self, proto):
396 ident = self._host_to_ident(proto.transport.getPeer().host)
397 self.conns[ident] = self.conns.get(ident, 0) + 1
398 def proto_lost_connection(self, proto, reason):
399 ident = self._host_to_ident(proto.transport.getPeer().host)
400 self.conns[ident] -= 1
401 if not self.conns[ident]:
402 del self.conns[ident]
404 def proto_connected(self, proto):
405 self.node.got_conn(proto)
406 def proto_disconnected(self, proto, reason):
407 self.node.lost_conn(proto, reason)
410 assert not self.running
413 def attempt_listen():
415 self.listen_port = reactor.listenTCP(self.node.port, self)
416 deferral.retry('Error binding to P2P port:', traceback=False)(attempt_listen)()
422 return self.listen_port.stopListening()
424 class ClientFactory(protocol.ClientFactory):
425 def __init__(self, node, desired_conns, max_attempts):
427 self.desired_conns = desired_conns
428 self.max_attempts = max_attempts
430 self.attempts = set()
434 def _host_to_ident(self, host):
435 a, b, c, d = host.split('.')
438 def buildProtocol(self, addr):
439 p = Protocol(self.node, False)
443 def startedConnecting(self, connector):
444 ident = self._host_to_ident(connector.getDestination().host)
445 if ident in self.attempts:
446 raise AssertionError('already have attempt')
447 self.attempts.add(ident)
449 def clientConnectionFailed(self, connector, reason):
450 self.attempts.remove(self._host_to_ident(connector.getDestination().host))
452 def clientConnectionLost(self, connector, reason):
453 self.attempts.remove(self._host_to_ident(connector.getDestination().host))
455 def proto_made_connection(self, proto):
457 def proto_lost_connection(self, proto, reason):
460 def proto_connected(self, proto):
461 self.conns.add(proto)
462 self.node.got_conn(proto)
463 def proto_disconnected(self, proto, reason):
464 self.conns.remove(proto)
465 self.node.lost_conn(proto, reason)
468 assert not self.running
470 self._stop_thinking = deferral.run_repeatedly(self._think)
474 self._stop_thinking()
478 if len(self.conns) < self.desired_conns and len(self.attempts) < self.max_attempts and self.node.addr_store:
479 (host, port), = self.node.get_good_peers(1)
481 if self._host_to_ident(host) in self.attempts:
483 elif host in self.node.bans and self.node.bans[host] > time.time():
486 #print 'Trying to connect to', host, port
487 reactor.connectTCP(host, port, self, timeout=5)
491 return random.expovariate(1/1)
493 class SingleClientFactory(protocol.ReconnectingClientFactory):
494 def __init__(self, node):
497 def buildProtocol(self, addr):
498 p = Protocol(self.node, incoming=False)
502 def proto_made_connection(self, proto):
504 def proto_lost_connection(self, proto, reason):
507 def proto_connected(self, proto):
509 self.node.got_conn(proto)
510 def proto_disconnected(self, proto, reason):
511 self.node.lost_conn(proto, reason)
514 def __init__(self, best_share_hash_func, port, net, addr_store={}, connect_addrs=set(), desired_outgoing_conns=10, max_outgoing_attempts=30, max_incoming_conns=50, preferred_storage=1000, traffic_happened=variable.Event(), known_txs_var=variable.Variable({}), mining_txs_var=variable.Variable({})):
515 self.best_share_hash_func = best_share_hash_func
518 self.addr_store = dict(addr_store)
519 self.connect_addrs = connect_addrs
520 self.preferred_storage = preferred_storage
521 self.traffic_happened = traffic_happened
522 self.known_txs_var = known_txs_var
523 self.mining_txs_var = mining_txs_var
525 self.nonce = random.randrange(2**64)
527 self.bans = {} # address -> end_time
528 self.clientfactory = ClientFactory(self, desired_outgoing_conns, max_outgoing_attempts)
529 self.serverfactory = ServerFactory(self, max_incoming_conns)
534 raise ValueError('already running')
536 self.clientfactory.start()
537 self.serverfactory.start()
538 self.singleclientconnectors = [reactor.connectTCP(addr, port, SingleClientFactory(self)) for addr, port in self.connect_addrs]
542 self._stop_thinking = deferral.run_repeatedly(self._think)
546 if len(self.addr_store) < self.preferred_storage and self.peers:
547 random.choice(self.peers.values()).send_getaddrs(count=8)
551 return random.expovariate(1/20)
553 @defer.inlineCallbacks
556 raise ValueError('already stopped')
560 self._stop_thinking()
561 yield self.clientfactory.stop()
562 yield self.serverfactory.stop()
563 for singleclientconnector in self.singleclientconnectors:
564 yield singleclientconnector.factory.stopTrying()
565 yield singleclientconnector.disconnect()
566 del self.singleclientconnectors
568 def got_conn(self, conn):
569 if conn.nonce in self.peers:
570 raise ValueError('already have peer')
571 self.peers[conn.nonce] = conn
573 print '%s connection to peer %s:%i established. p2pool version: %i %r' % ('Incoming' if conn.incoming else 'Outgoing', conn.addr[0], conn.addr[1], conn.other_version, conn.other_sub_version)
575 def lost_conn(self, conn, reason):
576 if conn.nonce not in self.peers:
577 raise ValueError('''don't have peer''')
578 if conn is not self.peers[conn.nonce]:
579 raise ValueError('wrong conn')
580 del self.peers[conn.nonce]
582 print 'Lost peer %s:%i - %s' % (conn.addr[0], conn.addr[1], reason.getErrorMessage())
585 def got_addr(self, (host, port), services, timestamp):
586 if (host, port) in self.addr_store:
587 old_services, old_first_seen, old_last_seen = self.addr_store[host, port]
588 self.addr_store[host, port] = services, old_first_seen, max(old_last_seen, timestamp)
590 self.addr_store[host, port] = services, timestamp, timestamp
592 def handle_shares(self, shares, peer):
593 print 'handle_shares', (shares, peer)
595 def handle_share_hashes(self, hashes, peer):
596 print 'handle_share_hashes', (hashes, peer)
598 def handle_get_shares(self, hashes, parents, stops, peer):
599 print 'handle_get_shares', (hashes, parents, stops, peer)
601 def handle_bestblock(self, header, peer):
602 print 'handle_bestblock', header
604 def get_good_peers(self, max_count):
606 return [x[0] for x in sorted(self.addr_store.iteritems(), key=lambda (k, (services, first_seen, last_seen)):
607 -math.log(max(3600, last_seen - first_seen))/math.log(max(3600, t - last_seen))*random.expovariate(1)