added some sanity checks in p2p transaction handling
[p2pool.git] / p2pool / p2p.py
1 from __future__ import division
2
3 import math
4 import random
5 import time
6
7 from twisted.internet import defer, protocol, reactor
8 from twisted.python import failure, log
9
10 import p2pool
11 from p2pool import data as p2pool_data
12 from p2pool.bitcoin import data as bitcoin_data
13 from p2pool.util import deferral, p2protocol, pack, variable
14
15 class PeerMisbehavingError(Exception):
16     pass
17
18
19 def fragment(f, **kwargs):
20     try:
21         return f(**kwargs)
22     except p2protocol.TooLong:
23         att(f, **dict((k, v[:len(v)//2]) for k, v in kwargs.iteritems()))
24         return att(f, **dict((k, v[len(v)//2:]) for k, v in kwargs.iteritems()))
25
26 class Protocol(p2protocol.Protocol):
27     max_remembered_txs_size = 2500000
28     
29     def __init__(self, node, incoming):
30         p2protocol.Protocol.__init__(self, node.net.PREFIX, 1000000, node.traffic_happened)
31         self.node = node
32         self.incoming = incoming
33         
34         self.other_version = None
35         self.connected2 = False
36     
37     def connectionMade(self):
38         p2protocol.Protocol.connectionMade(self)
39         
40         self.factory.proto_made_connection(self)
41         
42         self.connection_lost_event = variable.Event()
43         
44         self.addr = self.transport.getPeer().host, self.transport.getPeer().port
45         
46         self.send_version(
47             version=8,
48             services=0,
49             addr_to=dict(
50                 services=0,
51                 address=self.transport.getPeer().host,
52                 port=self.transport.getPeer().port,
53             ),
54             addr_from=dict(
55                 services=0,
56                 address=self.transport.getHost().host,
57                 port=self.transport.getHost().port,
58             ),
59             nonce=self.node.nonce,
60             sub_version=p2pool.__version__,
61             mode=1,
62             best_share_hash=self.node.best_share_hash_func(),
63         )
64         
65         self.timeout_delayed = reactor.callLater(10, self._connect_timeout)
66         
67         self.get_shares = deferral.GenericDeferrer(
68             max_id=2**256,
69             func=lambda id, hashes, parents, stops: self.send_sharereq(id=id, hashes=hashes, parents=parents, stops=stops),
70             timeout=15,
71             on_timeout=self.transport.loseConnection,
72         )
73         
74         self.remote_tx_hashes = set() # view of peer's known_txs # not actually initially empty, but sending txs instead of tx hashes won't hurt
75         self.remote_remembered_txs_size = 0
76         
77         self.remembered_txs = {} # view of peer's mining_txs
78         self.remembered_txs_size = 0
79         self.known_txs_cache = {}
80     
81     def _connect_timeout(self):
82         self.timeout_delayed = None
83         print 'Handshake timed out, disconnecting from %s:%i' % self.addr
84         self.transport.loseConnection()
85     
86     def packetReceived(self, command, payload2):
87         try:
88             if command != 'version' and not self.connected2:
89                 raise PeerMisbehavingError('first message was not version message')
90             p2protocol.Protocol.packetReceived(self, command, payload2)
91         except PeerMisbehavingError, e:
92             print 'Peer %s:%i misbehaving, will drop and ban. Reason:' % self.addr, e.message
93             self.badPeerHappened()
94     
95     def badPeerHappened(self):
96         if p2pool.DEBUG:
97             print "Bad peer banned:", self.addr
98         self.transport.loseConnection()
99         self.node.bans[self.transport.getPeer().host] = time.time() + 60*60
100     
101     def _timeout(self):
102         self.timeout_delayed = None
103         print 'Connection timed out, disconnecting from %s:%i' % self.addr
104         self.transport.loseConnection()
105     
106     message_version = pack.ComposedType([
107         ('version', pack.IntType(32)),
108         ('services', pack.IntType(64)),
109         ('addr_to', bitcoin_data.address_type),
110         ('addr_from', bitcoin_data.address_type),
111         ('nonce', pack.IntType(64)),
112         ('sub_version', pack.VarStrType()),
113         ('mode', pack.IntType(32)), # always 1 for legacy compatibility
114         ('best_share_hash', pack.PossiblyNoneType(0, pack.IntType(256))),
115     ])
116     def handle_version(self, version, services, addr_to, addr_from, nonce, sub_version, mode, best_share_hash):
117         if self.other_version is not None:
118             raise PeerMisbehavingError('more than one version message')
119         if version < 4:
120             raise PeerMisbehavingError('peer too old')
121         
122         self.other_version = version
123         self.other_sub_version = sub_version[:512]
124         self.other_services = services
125         
126         if nonce == self.node.nonce:
127             raise PeerMisbehavingError('was connected to self')
128         if nonce in self.node.peers:
129             if p2pool.DEBUG:
130                 print 'Detected duplicate connection, disconnecting from %s:%i' % self.addr
131             self.transport.loseConnection()
132             return
133         
134         self.nonce = nonce
135         self.connected2 = True
136         
137         self.timeout_delayed.cancel()
138         self.timeout_delayed = reactor.callLater(100, self._timeout)
139         
140         old_dataReceived = self.dataReceived
141         def new_dataReceived(data):
142             if self.timeout_delayed is not None:
143                 self.timeout_delayed.reset(100)
144             old_dataReceived(data)
145         self.dataReceived = new_dataReceived
146         
147         self.factory.proto_connected(self)
148         
149         self._stop_thread = deferral.run_repeatedly(lambda: [
150             self.send_ping(),
151         random.expovariate(1/100)][-1])
152         
153         self._stop_thread2 = deferral.run_repeatedly(lambda: [
154             self.send_addrme(port=self.node.port),
155         random.expovariate(1/(100*len(self.node.peers) + 1))][-1])
156         
157         if best_share_hash is not None:
158             self.node.handle_share_hashes([best_share_hash], self)
159         
160         if self.other_version < 8:
161             return
162         
163         def update_remote_view_of_my_known_txs(before, after):
164             added = set(after) - set(before)
165             removed = set(before) - set(after)
166             if added:
167                 self.send_have_tx(tx_hashes=list(added))
168             if removed:
169                 self.send_losing_tx(tx_hashes=list(removed))
170                 
171                 # cache forgotten txs here for a little while so latency of "losing_tx" packets doesn't cause problems
172                 key = max(self.known_txs_cache) + 1 if self.known_txs_cache else 0
173                 self.known_txs_cache[key] = dict((h, before[h]) for h in removed)
174                 reactor.callLater(20, self.known_txs_cache.pop, key)
175         watch_id = self.node.known_txs_var.transitioned.watch(update_remote_view_of_my_known_txs)
176         self.connection_lost_event.watch(lambda: self.node.known_txs_var.transitioned.unwatch(watch_id))
177         
178         self.send_have_tx(tx_hashes=self.node.known_txs_var.value.keys())
179         
180         def update_remote_view_of_my_mining_txs(before, after):
181             added = set(after) - set(before)
182             removed = set(before) - set(after)
183             if added:
184                 self.remote_remembered_txs_size += sum(len(bitcoin_data.tx_type.pack(after[x])) for x in added)
185                 assert self.remote_remembered_txs_size <= self.max_remembered_txs_size
186                 fragment(self.send_remember_tx, tx_hashes=[x for x in added if x in self.remote_tx_hashes], txs=[after[x] for x in added if x not in self.remote_tx_hashes])
187             if removed:
188                 self.send_forget_tx(tx_hashes=removed)
189                 self.remote_remembered_txs_size -= sum(len(bitcoin_data.tx_type.pack(before[x])) for x in removed)
190         watch_id2 = self.node.mining_txs_var.transitioned.watch(update_remote_view_of_my_mining_txs)
191         self.connection_lost_event.watch(lambda: self.node.mining_txs_var.transitioned.unwatch(watch_id2))
192         
193         self.remote_remembered_txs_size += sum(len(bitcoin_data.tx_type.pack(x)) for x in self.node.mining_txs_var.value.values())
194         assert self.remote_remembered_txs_size <= self.max_remembered_txs_size
195         fragment(self.send_remember_tx, tx_hashes=[], txs=self.node.mining_txs_var.value.values())
196     
197     message_ping = pack.ComposedType([])
198     def handle_ping(self):
199         pass
200     
201     message_addrme = pack.ComposedType([
202         ('port', pack.IntType(16)),
203     ])
204     def handle_addrme(self, port):
205         host = self.transport.getPeer().host
206         #print 'addrme from', host, port
207         if host == '127.0.0.1':
208             if random.random() < .8 and self.node.peers:
209                 random.choice(self.node.peers.values()).send_addrme(port=port) # services...
210         else:
211             self.node.got_addr((self.transport.getPeer().host, port), self.other_services, int(time.time()))
212             if random.random() < .8 and self.node.peers:
213                 random.choice(self.node.peers.values()).send_addrs(addrs=[
214                     dict(
215                         address=dict(
216                             services=self.other_services,
217                             address=host,
218                             port=port,
219                         ),
220                         timestamp=int(time.time()),
221                     ),
222                 ])
223     
224     message_addrs = pack.ComposedType([
225         ('addrs', pack.ListType(pack.ComposedType([
226             ('timestamp', pack.IntType(64)),
227             ('address', bitcoin_data.address_type),
228         ]))),
229     ])
230     def handle_addrs(self, addrs):
231         for addr_record in addrs:
232             self.node.got_addr((addr_record['address']['address'], addr_record['address']['port']), addr_record['address']['services'], min(int(time.time()), addr_record['timestamp']))
233             if random.random() < .8 and self.node.peers:
234                 random.choice(self.node.peers.values()).send_addrs(addrs=[addr_record])
235     
236     message_getaddrs = pack.ComposedType([
237         ('count', pack.IntType(32)),
238     ])
239     def handle_getaddrs(self, count):
240         if count > 100:
241             count = 100
242         self.send_addrs(addrs=[
243             dict(
244                 timestamp=int(self.node.addr_store[host, port][2]),
245                 address=dict(
246                     services=self.node.addr_store[host, port][0],
247                     address=host,
248                     port=port,
249                 ),
250             ) for host, port in
251             self.node.get_good_peers(count)
252         ])
253     
254     message_shares = pack.ComposedType([
255         ('shares', pack.ListType(p2pool_data.share_type)),
256     ])
257     def handle_shares(self, shares):
258         self.node.handle_shares([p2pool_data.load_share(share, self.node.net, self) for share in shares if share['type'] not in [6, 7]], self)
259     
260     def sendShares(self, shares, tracker, known_txs, include_txs_with=[]):
261         if not shares:
262             return defer.succeed(None)
263         
264         if self.other_version >= 8:
265             tx_hashes = set()
266             for share in shares:
267                 if share.hash in include_txs_with:
268                     tx_hashes.update(share.get_other_tx_hashes(tracker))
269             
270             hashes_to_send = [x for x in tx_hashes if x not in self.node.mining_txs_var.value and x in known_txs]
271             
272             new_remote_remembered_txs_size = self.remote_remembered_txs_size + sum(len(bitcoin_data.tx_type.pack(known_txs[x])) for x in hashes_to_send)
273             if new_remote_remembered_txs_size > self.max_remembered_txs_size:
274                 raise ValueError('shares have too many txs')
275             self.remote_remembered_txs_size = new_remote_remembered_txs_size
276             
277             fragment(self.send_remember_tx, tx_hashes=[x for x in hashes_to_send if x in self.remote_tx_hashes], txs=[known_txs[x] for x in hashes_to_send if x not in self.remote_tx_hashes])
278         
279         res = fragment(self.send_shares, shares=[share.as_share() for share in shares])
280         
281         if self.other_version >= 8:
282             res = self.send_forget_tx(tx_hashes=hashes_to_send)
283             
284             self.remote_remembered_txs_size -= sum(len(bitcoin_data.tx_type.pack(known_txs[x])) for x in hashes_to_send)
285         
286         return res
287     
288     
289     message_sharereq = pack.ComposedType([
290         ('id', pack.IntType(256)),
291         ('hashes', pack.ListType(pack.IntType(256))),
292         ('parents', pack.VarIntType()),
293         ('stops', pack.ListType(pack.IntType(256))),
294     ])
295     def handle_sharereq(self, id, hashes, parents, stops):
296         shares = self.node.handle_get_shares(hashes, parents, stops, self)
297         try:
298             self.send_sharereply(id=id, result='good', shares=[share.as_share() for share in shares])
299         except p2protocol.TooLong:
300             self.send_sharereply(id=id, result='too long', shares=[])
301     
302     message_sharereply = pack.ComposedType([
303         ('id', pack.IntType(256)),
304         ('result', pack.EnumType(pack.VarIntType(), {0: 'good', 1: 'too long', 2: 'unk2', 3: 'unk3', 4: 'unk4', 5: 'unk5', 6: 'unk6'})),
305         ('shares', pack.ListType(p2pool_data.share_type)),
306     ])
307     def handle_sharereply(self, id, result, shares):
308         if result == 'good':
309             res = [p2pool_data.load_share(share, self.node.net, self) for share in shares if share['type'] not in [6, 7]]
310         else:
311             res = failure.Failure("sharereply result: " + result)
312         self.get_shares.got_response(id, res)
313     
314     
315     message_bestblock = pack.ComposedType([
316         ('header', bitcoin_data.block_header_type),
317     ])
318     def handle_bestblock(self, header):
319         self.node.handle_bestblock(header, self)
320     
321     
322     message_have_tx = pack.ComposedType([
323         ('tx_hashes', pack.ListType(pack.IntType(256))),
324     ])
325     def handle_have_tx(self, tx_hashes):
326         assert self.remote_tx_hashes.isdisjoint(tx_hashes)
327         self.remote_tx_hashes.update(tx_hashes)
328     message_losing_tx = pack.ComposedType([
329         ('tx_hashes', pack.ListType(pack.IntType(256))),
330     ])
331     def handle_losing_tx(self, tx_hashes):
332         assert self.remote_tx_hashes.issuperset(tx_hashes)
333         self.remote_tx_hashes.difference_update(tx_hashes)
334     
335     
336     message_remember_tx = pack.ComposedType([
337         ('tx_hashes', pack.ListType(pack.IntType(256))),
338         ('txs', pack.ListType(bitcoin_data.tx_type)),
339     ])
340     def handle_remember_tx(self, tx_hashes, txs):
341         for tx_hash in tx_hashes:
342             if tx_hash in self.remembered_txs:
343                 print >>sys.stderr, 'Peer referenced transaction twice, disconnecting'
344                 self.transport.loseConnection()
345                 return
346             
347             if tx_hash in self.node.known_txs_var.value:
348                 tx = self.node.known_txs_var.value[tx_hash]
349             else:
350                 for cache in self.known_txs_cache.itervalues():
351                     if tx_hash in cache:
352                         tx = cache[tx_hash]
353                         print 'Transaction rescued from peer latency cache!'
354                         break
355                 else:
356                     print >>sys.stderr, 'Peer referenced unknown transaction, disconnecting'
357                     self.transport.loseConnection()
358                     return
359             
360             self.remembered_txs[tx_hash] = tx
361             self.remembered_txs_size += len(bitcoin_data.tx_type.pack(tx))
362         new_known_txs = dict(self.node.known_txs_var.value)
363         warned = False
364         for tx in txs:
365             tx_hash = bitcoin_data.hash256(bitcoin_data.tx_type.pack(tx))
366             if tx_hash in self.remembered_txs:
367                 print >>sys.stderr, 'Peer referenced transaction twice, disconnecting'
368                 self.transport.loseConnection()
369                 return
370             
371             if tx_hash in self.node.known_txs_var.value and not warned:
372                 print 'Peer sent entire transaction that was already received'
373                 warned = True
374             
375             self.remembered_txs[tx_hash] = tx
376             self.remembered_txs_size += len(bitcoin_data.tx_type.pack(tx))
377             new_known_txs[tx_hash] = tx
378         self.node.known_txs_var.set(new_known_txs)
379         if self.remembered_txs_size >= self.max_remembered_txs_size:
380             raise PeerMisbehavingError('too much transaction data stored')
381     message_forget_tx = pack.ComposedType([
382         ('tx_hashes', pack.ListType(pack.IntType(256))),
383     ])
384     def handle_forget_tx(self, tx_hashes):
385         for tx_hash in tx_hashes:
386             self.remembered_txs_size -= len(bitcoin_data.tx_type.pack(self.remembered_txs[tx_hash]))
387             assert self.remembered_txs_size >= 0
388             del self.remembered_txs[tx_hash]
389     
390     
391     def connectionLost(self, reason):
392         self.connection_lost_event.happened()
393         if self.timeout_delayed is not None:
394             self.timeout_delayed.cancel()
395         if self.connected2:
396             self.factory.proto_disconnected(self, reason)
397             self._stop_thread()
398             self._stop_thread2()
399             self.connected2 = False
400         self.factory.proto_lost_connection(self, reason)
401         if p2pool.DEBUG:
402             print "Peer connection lost:", self.addr, reason
403         self.get_shares.respond_all(reason)
404     
405     @defer.inlineCallbacks
406     def do_ping(self):
407         start = reactor.seconds()
408         yield self.get_shares(hashes=[0], parents=0, stops=[])
409         end = reactor.seconds()
410         defer.returnValue(end - start)
411
412 class ServerFactory(protocol.ServerFactory):
413     def __init__(self, node, max_conns):
414         self.node = node
415         self.max_conns = max_conns
416         
417         self.conns = {}
418         self.running = False
419     
420     def buildProtocol(self, addr):
421         if sum(self.conns.itervalues()) >= self.max_conns or self.conns.get(self._host_to_ident(addr.host), 0) >= 3:
422             return None
423         if addr.host in self.node.bans and self.node.bans[addr.host] > time.time():
424             return None
425         p = Protocol(self.node, True)
426         p.factory = self
427         if p2pool.DEBUG:
428             print "Got peer connection from:", addr
429         return p
430     
431     def _host_to_ident(self, host):
432         a, b, c, d = host.split('.')
433         return a, b
434     
435     def proto_made_connection(self, proto):
436         ident = self._host_to_ident(proto.transport.getPeer().host)
437         self.conns[ident] = self.conns.get(ident, 0) + 1
438     def proto_lost_connection(self, proto, reason):
439         ident = self._host_to_ident(proto.transport.getPeer().host)
440         self.conns[ident] -= 1
441         if not self.conns[ident]:
442             del self.conns[ident]
443     
444     def proto_connected(self, proto):
445         self.node.got_conn(proto)
446     def proto_disconnected(self, proto, reason):
447         self.node.lost_conn(proto, reason)
448     
449     def start(self):
450         assert not self.running
451         self.running = True
452         
453         def attempt_listen():
454             if self.running:
455                 self.listen_port = reactor.listenTCP(self.node.port, self)
456         deferral.retry('Error binding to P2P port:', traceback=False)(attempt_listen)()
457     
458     def stop(self):
459         assert self.running
460         self.running = False
461         
462         return self.listen_port.stopListening()
463
464 class ClientFactory(protocol.ClientFactory):
465     def __init__(self, node, desired_conns, max_attempts):
466         self.node = node
467         self.desired_conns = desired_conns
468         self.max_attempts = max_attempts
469         
470         self.attempts = set()
471         self.conns = set()
472         self.running = False
473     
474     def _host_to_ident(self, host):
475         a, b, c, d = host.split('.')
476         return a, b
477     
478     def buildProtocol(self, addr):
479         p = Protocol(self.node, False)
480         p.factory = self
481         return p
482     
483     def startedConnecting(self, connector):
484         ident = self._host_to_ident(connector.getDestination().host)
485         if ident in self.attempts:
486             raise AssertionError('already have attempt')
487         self.attempts.add(ident)
488     
489     def clientConnectionFailed(self, connector, reason):
490         self.attempts.remove(self._host_to_ident(connector.getDestination().host))
491     
492     def clientConnectionLost(self, connector, reason):
493         self.attempts.remove(self._host_to_ident(connector.getDestination().host))
494     
495     def proto_made_connection(self, proto):
496         pass
497     def proto_lost_connection(self, proto, reason):
498         pass
499     
500     def proto_connected(self, proto):
501         self.conns.add(proto)
502         self.node.got_conn(proto)
503     def proto_disconnected(self, proto, reason):
504         self.conns.remove(proto)
505         self.node.lost_conn(proto, reason)
506     
507     def start(self):
508         assert not self.running
509         self.running = True
510         self._stop_thinking = deferral.run_repeatedly(self._think)
511     def stop(self):
512         assert self.running
513         self.running = False
514         self._stop_thinking()
515     
516     def _think(self):
517         try:
518             if len(self.conns) < self.desired_conns and len(self.attempts) < self.max_attempts and self.node.addr_store:
519                 (host, port), = self.node.get_good_peers(1)
520                 
521                 if self._host_to_ident(host) in self.attempts:
522                     pass
523                 elif host in self.node.bans and self.node.bans[host] > time.time():
524                     pass
525                 else:
526                     #print 'Trying to connect to', host, port
527                     reactor.connectTCP(host, port, self, timeout=5)
528         except:
529             log.err()
530         
531         return random.expovariate(1/1)
532
533 class SingleClientFactory(protocol.ReconnectingClientFactory):
534     def __init__(self, node):
535         self.node = node
536     
537     def buildProtocol(self, addr):
538         p = Protocol(self.node, incoming=False)
539         p.factory = self
540         return p
541     
542     def proto_made_connection(self, proto):
543         pass
544     def proto_lost_connection(self, proto, reason):
545         pass
546     
547     def proto_connected(self, proto):
548         self.resetDelay()
549         self.node.got_conn(proto)
550     def proto_disconnected(self, proto, reason):
551         self.node.lost_conn(proto, reason)
552
553 class Node(object):
554     def __init__(self, best_share_hash_func, port, net, addr_store={}, connect_addrs=set(), desired_outgoing_conns=10, max_outgoing_attempts=30, max_incoming_conns=50, preferred_storage=1000, traffic_happened=variable.Event(), known_txs_var=variable.Variable({}), mining_txs_var=variable.Variable({})):
555         self.best_share_hash_func = best_share_hash_func
556         self.port = port
557         self.net = net
558         self.addr_store = dict(addr_store)
559         self.connect_addrs = connect_addrs
560         self.preferred_storage = preferred_storage
561         self.traffic_happened = traffic_happened
562         self.known_txs_var = known_txs_var
563         self.mining_txs_var = mining_txs_var
564         
565         self.nonce = random.randrange(2**64)
566         self.peers = {}
567         self.bans = {} # address -> end_time
568         self.clientfactory = ClientFactory(self, desired_outgoing_conns, max_outgoing_attempts)
569         self.serverfactory = ServerFactory(self, max_incoming_conns)
570         self.running = False
571     
572     def start(self):
573         if self.running:
574             raise ValueError('already running')
575         
576         self.clientfactory.start()
577         self.serverfactory.start()
578         self.singleclientconnectors = [reactor.connectTCP(addr, port, SingleClientFactory(self)) for addr, port in self.connect_addrs]
579         
580         self.running = True
581         
582         self._stop_thinking = deferral.run_repeatedly(self._think)
583     
584     def _think(self):
585         try:
586             if len(self.addr_store) < self.preferred_storage and self.peers:
587                 random.choice(self.peers.values()).send_getaddrs(count=8)
588         except:
589             log.err()
590         
591         return random.expovariate(1/20)
592     
593     @defer.inlineCallbacks
594     def stop(self):
595         if not self.running:
596             raise ValueError('already stopped')
597         
598         self.running = False
599         
600         self._stop_thinking()
601         yield self.clientfactory.stop()
602         yield self.serverfactory.stop()
603         for singleclientconnector in self.singleclientconnectors:
604             yield singleclientconnector.factory.stopTrying()
605             yield singleclientconnector.disconnect()
606         del self.singleclientconnectors
607     
608     def got_conn(self, conn):
609         if conn.nonce in self.peers:
610             raise ValueError('already have peer')
611         self.peers[conn.nonce] = conn
612         
613         print '%s connection to peer %s:%i established. p2pool version: %i %r' % ('Incoming' if conn.incoming else 'Outgoing', conn.addr[0], conn.addr[1], conn.other_version, conn.other_sub_version)
614     
615     def lost_conn(self, conn, reason):
616         if conn.nonce not in self.peers:
617             raise ValueError('''don't have peer''')
618         if conn is not self.peers[conn.nonce]:
619             raise ValueError('wrong conn')
620         del self.peers[conn.nonce]
621         
622         print 'Lost peer %s:%i - %s' % (conn.addr[0], conn.addr[1], reason.getErrorMessage())
623     
624     
625     def got_addr(self, (host, port), services, timestamp):
626         if (host, port) in self.addr_store:
627             old_services, old_first_seen, old_last_seen = self.addr_store[host, port]
628             self.addr_store[host, port] = services, old_first_seen, max(old_last_seen, timestamp)
629         else:
630             self.addr_store[host, port] = services, timestamp, timestamp
631     
632     def handle_shares(self, shares, peer):
633         print 'handle_shares', (shares, peer)
634     
635     def handle_share_hashes(self, hashes, peer):
636         print 'handle_share_hashes', (hashes, peer)
637     
638     def handle_get_shares(self, hashes, parents, stops, peer):
639         print 'handle_get_shares', (hashes, parents, stops, peer)
640     
641     def handle_bestblock(self, header, peer):
642         print 'handle_bestblock', header
643     
644     def get_good_peers(self, max_count):
645         t = time.time()
646         return [x[0] for x in sorted(self.addr_store.iteritems(), key=lambda (k, (services, first_seen, last_seen)):
647             -math.log(max(3600, last_seen - first_seen))/math.log(max(3600, t - last_seen))*random.expovariate(1)
648         )][:max_count]