penalization and sending txs ahead of shares
[p2pool.git] / p2pool / p2p.py
1 from __future__ import division
2
3 import math
4 import random
5 import time
6
7 from twisted.internet import defer, protocol, reactor
8 from twisted.python import failure, log
9
10 import p2pool
11 from p2pool import data as p2pool_data
12 from p2pool.bitcoin import data as bitcoin_data
13 from p2pool.util import deferral, p2protocol, pack, variable
14
15 class PeerMisbehavingError(Exception):
16     pass
17
18
19 def fragment(f, **kwargs):
20     try:
21         return f(**kwargs)
22     except p2protocol.TooLong:
23         att(f, **dict((k, v[:len(v)//2]) for k, v in kwargs.iteritems()))
24         return att(f, **dict((k, v[len(v)//2:]) for k, v in kwargs.iteritems()))
25
26 class Protocol(p2protocol.Protocol):
27     def __init__(self, node, incoming):
28         p2protocol.Protocol.__init__(self, node.net.PREFIX, 1000000, node.traffic_happened)
29         self.node = node
30         self.incoming = incoming
31         
32         self.other_version = None
33         self.connected2 = False
34     
35     def connectionMade(self):
36         p2protocol.Protocol.connectionMade(self)
37         
38         self.factory.proto_made_connection(self)
39         
40         self.connection_lost_event = variable.Event()
41         
42         self.addr = self.transport.getPeer().host, self.transport.getPeer().port
43         
44         self.send_version(
45             version=4,
46             services=0,
47             addr_to=dict(
48                 services=0,
49                 address=self.transport.getPeer().host,
50                 port=self.transport.getPeer().port,
51             ),
52             addr_from=dict(
53                 services=0,
54                 address=self.transport.getHost().host,
55                 port=self.transport.getHost().port,
56             ),
57             nonce=self.node.nonce,
58             sub_version=p2pool.__version__,
59             mode=1,
60             best_share_hash=self.node.best_share_hash_func(),
61         )
62         
63         self.timeout_delayed = reactor.callLater(10, self._connect_timeout)
64         
65         self.get_shares = deferral.GenericDeferrer(
66             max_id=2**256,
67             func=lambda id, hashes, parents, stops: self.send_sharereq(id=id, hashes=hashes, parents=parents, stops=stops),
68             timeout=15,
69             on_timeout=self.transport.loseConnection,
70         )
71         
72         self.remote_tx_hashes = set() # view of peer's known_txs # not actually initially empty, but sending txs instead of tx hashes won't hurt
73         self.remembered_txs = {} # view of peer's mining_txs
74     
75     def _connect_timeout(self):
76         self.timeout_delayed = None
77         print 'Handshake timed out, disconnecting from %s:%i' % self.addr
78         self.transport.loseConnection()
79     
80     def packetReceived(self, command, payload2):
81         try:
82             if command != 'version' and not self.connected2:
83                 raise PeerMisbehavingError('first message was not version message')
84             p2protocol.Protocol.packetReceived(self, command, payload2)
85         except PeerMisbehavingError, e:
86             print 'Peer %s:%i misbehaving, will drop and ban. Reason:' % self.addr, e.message
87             self.badPeerHappened()
88     
89     def badPeerHappened(self):
90         if p2pool.DEBUG:
91             print "Bad peer banned:", self.addr
92         self.transport.loseConnection()
93         self.node.bans[self.transport.getPeer().host] = time.time() + 60*60
94     
95     def _timeout(self):
96         self.timeout_delayed = None
97         print 'Connection timed out, disconnecting from %s:%i' % self.addr
98         self.transport.loseConnection()
99     
100     message_version = pack.ComposedType([
101         ('version', pack.IntType(32)),
102         ('services', pack.IntType(64)),
103         ('addr_to', bitcoin_data.address_type),
104         ('addr_from', bitcoin_data.address_type),
105         ('nonce', pack.IntType(64)),
106         ('sub_version', pack.VarStrType()),
107         ('mode', pack.IntType(32)), # always 1 for legacy compatibility
108         ('best_share_hash', pack.PossiblyNoneType(0, pack.IntType(256))),
109     ])
110     def handle_version(self, version, services, addr_to, addr_from, nonce, sub_version, mode, best_share_hash):
111         if self.other_version is not None:
112             raise PeerMisbehavingError('more than one version message')
113         if version < 4:
114             raise PeerMisbehavingError('peer too old')
115         
116         self.other_version = version
117         self.other_sub_version = sub_version[:512]
118         self.other_services = services
119         
120         if nonce == self.node.nonce:
121             raise PeerMisbehavingError('was connected to self')
122         if nonce in self.node.peers:
123             if p2pool.DEBUG:
124                 print 'Detected duplicate connection, disconnecting from %s:%i' % self.addr
125             self.transport.loseConnection()
126             return
127         
128         self.nonce = nonce
129         self.connected2 = True
130         
131         self.timeout_delayed.cancel()
132         self.timeout_delayed = reactor.callLater(100, self._timeout)
133         
134         old_dataReceived = self.dataReceived
135         def new_dataReceived(data):
136             if self.timeout_delayed is not None:
137                 self.timeout_delayed.reset(100)
138             old_dataReceived(data)
139         self.dataReceived = new_dataReceived
140         
141         self.factory.proto_connected(self)
142         
143         self._stop_thread = deferral.run_repeatedly(lambda: [
144             self.send_ping(),
145         random.expovariate(1/100)][-1])
146         
147         self._stop_thread2 = deferral.run_repeatedly(lambda: [
148             self.send_addrme(port=self.node.port),
149         random.expovariate(1/(100*len(self.node.peers) + 1))][-1])
150         
151         if best_share_hash is not None:
152             self.node.handle_share_hashes([best_share_hash], self)
153         
154         def update_remote_view_of_my_known_txs(before, after):
155             added = set(after) - set(before)
156             removed = set(before) - set(after)
157             if added:
158                 self.send_have_tx(tx_hashes=list(added))
159             if removed:
160                 self.send_losing_tx(tx_hashes=list(removed))
161                 # XXX cache locally
162         watch_id = self.node.known_txs_var.transitioned.watch(update_remote_view_of_my_known_txs)
163         self.connection_lost_event.watch(lambda: self.node.known_txs_var.transitioned.unwatch(watch_id))
164         
165         self.send_have_tx(tx_hashes=self.node.known_txs_var.value.keys())
166         
167         def update_remote_view_of_my_mining_txs(before, after):
168             added = set(after) - set(before)
169             removed = set(before) - set(after)
170             if added:
171                 fragment(self.send_remember_tx, tx_hashes=[x for x in added if x in self.remote_tx_hashes], txs=[after[x] for x in added if x not in self.remote_tx_hashes])
172             if removed:
173                 self.send_forget_tx(tx_hashes=removed)
174         watch_id2 = self.node.mining_txs_var.transitioned.watch(update_remote_view_of_my_mining_txs)
175         self.connection_lost_event.watch(lambda: self.node.mining_txs_var.transitioned.unwatch(watch_id2))
176         
177         fragment(self.send_remember_tx, tx_hashes=[], txs=self.node.mining_txs_var.value.values())
178     
179     message_ping = pack.ComposedType([])
180     def handle_ping(self):
181         pass
182     
183     message_addrme = pack.ComposedType([
184         ('port', pack.IntType(16)),
185     ])
186     def handle_addrme(self, port):
187         host = self.transport.getPeer().host
188         #print 'addrme from', host, port
189         if host == '127.0.0.1':
190             if random.random() < .8 and self.node.peers:
191                 random.choice(self.node.peers.values()).send_addrme(port=port) # services...
192         else:
193             self.node.got_addr((self.transport.getPeer().host, port), self.other_services, int(time.time()))
194             if random.random() < .8 and self.node.peers:
195                 random.choice(self.node.peers.values()).send_addrs(addrs=[
196                     dict(
197                         address=dict(
198                             services=self.other_services,
199                             address=host,
200                             port=port,
201                         ),
202                         timestamp=int(time.time()),
203                     ),
204                 ])
205     
206     message_addrs = pack.ComposedType([
207         ('addrs', pack.ListType(pack.ComposedType([
208             ('timestamp', pack.IntType(64)),
209             ('address', bitcoin_data.address_type),
210         ]))),
211     ])
212     def handle_addrs(self, addrs):
213         for addr_record in addrs:
214             self.node.got_addr((addr_record['address']['address'], addr_record['address']['port']), addr_record['address']['services'], min(int(time.time()), addr_record['timestamp']))
215             if random.random() < .8 and self.node.peers:
216                 random.choice(self.node.peers.values()).send_addrs(addrs=[addr_record])
217     
218     message_getaddrs = pack.ComposedType([
219         ('count', pack.IntType(32)),
220     ])
221     def handle_getaddrs(self, count):
222         if count > 100:
223             count = 100
224         self.send_addrs(addrs=[
225             dict(
226                 timestamp=int(self.node.addr_store[host, port][2]),
227                 address=dict(
228                     services=self.node.addr_store[host, port][0],
229                     address=host,
230                     port=port,
231                 ),
232             ) for host, port in
233             self.node.get_good_peers(count)
234         ])
235     
236     message_shares = pack.ComposedType([
237         ('shares', pack.ListType(p2pool_data.share_type)),
238     ])
239     def handle_shares(self, shares):
240         self.node.handle_shares([p2pool_data.load_share(share, self.node.net, self) for share in shares if share['type'] not in [6, 7]], self)
241     
242     def sendShares(self, shares, tracker, known_txs, include_txs_with=[]):
243         if not shares:
244             return defer.succeed(None)
245         
246         tx_hashes = set()
247         for share in shares:
248             if share.hash in include_txs_with:
249                 tx_hashes.update(share.get_other_tx_hashes(tracker))
250         
251         hashes_to_send = [x for x in tx_hashes if x not in self.node.mining_txs_var.value]
252         
253         fragment(self.send_remember_tx, tx_hashes=[x for x in hashes_to_send if x in self.remote_tx_hashes], txs=[known_txs[x] for x in hashes_to_send if x not in self.remote_tx_hashes and x in known_txs]) # last one required?
254         fragment(self.send_shares, shares=[share.as_share() for share in shares])
255         res = self.send_forget_tx(tx_hashes=hashes_to_send)
256         
257         return res
258     
259     
260     message_sharereq = pack.ComposedType([
261         ('id', pack.IntType(256)),
262         ('hashes', pack.ListType(pack.IntType(256))),
263         ('parents', pack.VarIntType()),
264         ('stops', pack.ListType(pack.IntType(256))),
265     ])
266     def handle_sharereq(self, id, hashes, parents, stops):
267         shares = self.node.handle_get_shares(hashes, parents, stops, self)
268         try:
269             self.send_sharereply(id=id, result='good', shares=[share.as_share() for share in shares])
270         except p2protocol.TooLong:
271             self.send_sharereply(id=id, result='too long', shares=[])
272     
273     message_sharereply = pack.ComposedType([
274         ('id', pack.IntType(256)),
275         ('result', pack.EnumType(pack.VarIntType(), {0: 'good', 1: 'too long', 2: 'unk2', 3: 'unk3', 4: 'unk4', 5: 'unk5', 6: 'unk6'})),
276         ('shares', pack.ListType(p2pool_data.share_type)),
277     ])
278     def handle_sharereply(self, id, result, shares):
279         if result == 'good':
280             res = [p2pool_data.load_share(share, self.node.net, self) for share in shares if share['type'] not in [6, 7]]
281         else:
282             res = failure.Failure("sharereply result: " + result)
283         self.get_shares.got_response(id, res)
284     
285     
286     message_bestblock = pack.ComposedType([
287         ('header', bitcoin_data.block_header_type),
288     ])
289     def handle_bestblock(self, header):
290         self.node.handle_bestblock(header, self)
291     
292     
293     message_have_tx = pack.ComposedType([
294         ('tx_hashes', pack.ListType(pack.IntType(256))),
295     ])
296     def handle_have_tx(self, tx_hashes):
297         self.remote_tx_hashes.update(tx_hashes)
298     message_losing_tx = pack.ComposedType([
299         ('tx_hashes', pack.ListType(pack.IntType(256))),
300     ])
301     def handle_losing_tx(self, tx_hashes):
302         self.remote_tx_hashes.difference_update(tx_hashes)
303     
304     
305     message_remember_tx = pack.ComposedType([
306         ('tx_hashes', pack.ListType(pack.IntType(256))),
307         ('txs', pack.ListType(bitcoin_data.tx_type)),
308     ])
309     def handle_remember_tx(self, tx_hashes, txs):
310         for tx_hash in tx_hashes:
311             if tx_hash not in self.remembered_txs:
312                 self.remembered_txs[tx_hash] = self.node.known_txs_var.value[tx_hash]
313         new_known_txs = dict(self.node.known_txs_var.value)
314         for tx in txs:
315             tx_hash = bitcoin_data.hash256(bitcoin_data.tx_type.pack(tx))
316             if tx_hash not in self.remembered_txs:
317                 self.remembered_txs[tx_hash] = tx
318             new_known_txs[tx_hash] = tx
319         self.node.known_txs_var.set(new_known_txs)
320     message_forget_tx = pack.ComposedType([
321         ('tx_hashes', pack.ListType(pack.IntType(256))),
322     ])
323     def handle_forget_tx(self, tx_hashes):
324         for tx_hash in tx_hashes:
325             del self.remembered_txs[tx_hash]
326     
327     
328     def connectionLost(self, reason):
329         self.connection_lost_event.happened()
330         if self.timeout_delayed is not None:
331             self.timeout_delayed.cancel()
332         if self.connected2:
333             self.factory.proto_disconnected(self, reason)
334             self._stop_thread()
335             self._stop_thread2()
336             self.connected2 = False
337         self.factory.proto_lost_connection(self, reason)
338         if p2pool.DEBUG:
339             print "Peer connection lost:", self.addr, reason
340         self.get_shares.respond_all(reason)
341     
342     @defer.inlineCallbacks
343     def do_ping(self):
344         start = reactor.seconds()
345         yield self.get_shares(hashes=[0], parents=0, stops=[])
346         end = reactor.seconds()
347         defer.returnValue(end - start)
348
349 class ServerFactory(protocol.ServerFactory):
350     def __init__(self, node, max_conns):
351         self.node = node
352         self.max_conns = max_conns
353         
354         self.conns = {}
355         self.running = False
356     
357     def buildProtocol(self, addr):
358         if sum(self.conns.itervalues()) >= self.max_conns or self.conns.get(self._host_to_ident(addr.host), 0) >= 3:
359             return None
360         if addr.host in self.node.bans and self.node.bans[addr.host] > time.time():
361             return None
362         p = Protocol(self.node, True)
363         p.factory = self
364         if p2pool.DEBUG:
365             print "Got peer connection from:", addr
366         return p
367     
368     def _host_to_ident(self, host):
369         a, b, c, d = host.split('.')
370         return a, b
371     
372     def proto_made_connection(self, proto):
373         ident = self._host_to_ident(proto.transport.getPeer().host)
374         self.conns[ident] = self.conns.get(ident, 0) + 1
375     def proto_lost_connection(self, proto, reason):
376         ident = self._host_to_ident(proto.transport.getPeer().host)
377         self.conns[ident] -= 1
378         if not self.conns[ident]:
379             del self.conns[ident]
380     
381     def proto_connected(self, proto):
382         self.node.got_conn(proto)
383     def proto_disconnected(self, proto, reason):
384         self.node.lost_conn(proto, reason)
385     
386     def start(self):
387         assert not self.running
388         self.running = True
389         
390         def attempt_listen():
391             if self.running:
392                 self.listen_port = reactor.listenTCP(self.node.port, self)
393         deferral.retry('Error binding to P2P port:', traceback=False)(attempt_listen)()
394     
395     def stop(self):
396         assert self.running
397         self.running = False
398         
399         return self.listen_port.stopListening()
400
401 class ClientFactory(protocol.ClientFactory):
402     def __init__(self, node, desired_conns, max_attempts):
403         self.node = node
404         self.desired_conns = desired_conns
405         self.max_attempts = max_attempts
406         
407         self.attempts = set()
408         self.conns = set()
409         self.running = False
410     
411     def _host_to_ident(self, host):
412         a, b, c, d = host.split('.')
413         return a, b
414     
415     def buildProtocol(self, addr):
416         p = Protocol(self.node, False)
417         p.factory = self
418         return p
419     
420     def startedConnecting(self, connector):
421         ident = self._host_to_ident(connector.getDestination().host)
422         if ident in self.attempts:
423             raise AssertionError('already have attempt')
424         self.attempts.add(ident)
425     
426     def clientConnectionFailed(self, connector, reason):
427         self.attempts.remove(self._host_to_ident(connector.getDestination().host))
428     
429     def clientConnectionLost(self, connector, reason):
430         self.attempts.remove(self._host_to_ident(connector.getDestination().host))
431     
432     def proto_made_connection(self, proto):
433         pass
434     def proto_lost_connection(self, proto, reason):
435         pass
436     
437     def proto_connected(self, proto):
438         self.conns.add(proto)
439         self.node.got_conn(proto)
440     def proto_disconnected(self, proto, reason):
441         self.conns.remove(proto)
442         self.node.lost_conn(proto, reason)
443     
444     def start(self):
445         assert not self.running
446         self.running = True
447         self._stop_thinking = deferral.run_repeatedly(self._think)
448     def stop(self):
449         assert self.running
450         self.running = False
451         self._stop_thinking()
452     
453     def _think(self):
454         try:
455             if len(self.conns) < self.desired_conns and len(self.attempts) < self.max_attempts and self.node.addr_store:
456                 (host, port), = self.node.get_good_peers(1)
457                 
458                 if self._host_to_ident(host) in self.attempts:
459                     pass
460                 elif host in self.node.bans and self.node.bans[host] > time.time():
461                     pass
462                 else:
463                     #print 'Trying to connect to', host, port
464                     reactor.connectTCP(host, port, self, timeout=5)
465         except:
466             log.err()
467         
468         return random.expovariate(1/1)
469
470 class SingleClientFactory(protocol.ReconnectingClientFactory):
471     def __init__(self, node):
472         self.node = node
473     
474     def buildProtocol(self, addr):
475         p = Protocol(self.node, incoming=False)
476         p.factory = self
477         return p
478     
479     def proto_made_connection(self, proto):
480         pass
481     def proto_lost_connection(self, proto, reason):
482         pass
483     
484     def proto_connected(self, proto):
485         self.resetDelay()
486         self.node.got_conn(proto)
487     def proto_disconnected(self, proto, reason):
488         self.node.lost_conn(proto, reason)
489
490 class Node(object):
491     def __init__(self, best_share_hash_func, port, net, addr_store={}, connect_addrs=set(), desired_outgoing_conns=10, max_outgoing_attempts=30, max_incoming_conns=50, preferred_storage=1000, traffic_happened=variable.Event(), known_txs_var=variable.Variable({}), mining_txs_var=variable.Variable({})):
492         self.best_share_hash_func = best_share_hash_func
493         self.port = port
494         self.net = net
495         self.addr_store = dict(addr_store)
496         self.connect_addrs = connect_addrs
497         self.preferred_storage = preferred_storage
498         self.traffic_happened = traffic_happened
499         self.known_txs_var = known_txs_var
500         self.mining_txs_var = mining_txs_var
501         
502         self.nonce = random.randrange(2**64)
503         self.peers = {}
504         self.bans = {} # address -> end_time
505         self.clientfactory = ClientFactory(self, desired_outgoing_conns, max_outgoing_attempts)
506         self.serverfactory = ServerFactory(self, max_incoming_conns)
507         self.running = False
508     
509     def start(self):
510         if self.running:
511             raise ValueError('already running')
512         
513         self.clientfactory.start()
514         self.serverfactory.start()
515         self.singleclientconnectors = [reactor.connectTCP(addr, port, SingleClientFactory(self)) for addr, port in self.connect_addrs]
516         
517         self.running = True
518         
519         self._stop_thinking = deferral.run_repeatedly(self._think)
520     
521     def _think(self):
522         try:
523             if len(self.addr_store) < self.preferred_storage and self.peers:
524                 random.choice(self.peers.values()).send_getaddrs(count=8)
525         except:
526             log.err()
527         
528         return random.expovariate(1/20)
529     
530     @defer.inlineCallbacks
531     def stop(self):
532         if not self.running:
533             raise ValueError('already stopped')
534         
535         self.running = False
536         
537         self._stop_thinking()
538         yield self.clientfactory.stop()
539         yield self.serverfactory.stop()
540         for singleclientconnector in self.singleclientconnectors:
541             yield singleclientconnector.factory.stopTrying()
542             yield singleclientconnector.disconnect()
543         del self.singleclientconnectors
544     
545     def got_conn(self, conn):
546         if conn.nonce in self.peers:
547             raise ValueError('already have peer')
548         self.peers[conn.nonce] = conn
549         
550         print '%s connection to peer %s:%i established. p2pool version: %i %r' % ('Incoming' if conn.incoming else 'Outgoing', conn.addr[0], conn.addr[1], conn.other_version, conn.other_sub_version)
551     
552     def lost_conn(self, conn, reason):
553         if conn.nonce not in self.peers:
554             raise ValueError('''don't have peer''')
555         if conn is not self.peers[conn.nonce]:
556             raise ValueError('wrong conn')
557         del self.peers[conn.nonce]
558         
559         print 'Lost peer %s:%i - %s' % (conn.addr[0], conn.addr[1], reason.getErrorMessage())
560     
561     
562     def got_addr(self, (host, port), services, timestamp):
563         if (host, port) in self.addr_store:
564             old_services, old_first_seen, old_last_seen = self.addr_store[host, port]
565             self.addr_store[host, port] = services, old_first_seen, max(old_last_seen, timestamp)
566         else:
567             self.addr_store[host, port] = services, timestamp, timestamp
568     
569     def handle_shares(self, shares, peer):
570         print 'handle_shares', (shares, peer)
571     
572     def handle_share_hashes(self, hashes, peer):
573         print 'handle_share_hashes', (hashes, peer)
574     
575     def handle_get_shares(self, hashes, parents, stops, peer):
576         print 'handle_get_shares', (hashes, parents, stops, peer)
577     
578     def handle_bestblock(self, header, peer):
579         print 'handle_bestblock', header
580     
581     def get_good_peers(self, max_count):
582         t = time.time()
583         return [x[0] for x in sorted(self.addr_store.iteritems(), key=lambda (k, (services, first_seen, last_seen)):
584             -math.log(max(3600, last_seen - first_seen))/math.log(max(3600, t - last_seen))*random.expovariate(1)
585         )][:max_count]