tx preforwarding working
[p2pool.git] / p2pool / p2p.py
1 from __future__ import division
2
3 import math
4 import random
5 import time
6
7 from twisted.internet import defer, protocol, reactor
8 from twisted.python import failure, log
9
10 import p2pool
11 from p2pool import data as p2pool_data
12 from p2pool.bitcoin import data as bitcoin_data
13 from p2pool.util import deferral, p2protocol, pack, variable
14
15 class PeerMisbehavingError(Exception):
16     pass
17
18 class Protocol(p2protocol.Protocol):
19     def __init__(self, node, incoming):
20         p2protocol.Protocol.__init__(self, node.net.PREFIX, 1000000, node.traffic_happened)
21         self.node = node
22         self.incoming = incoming
23         
24         self.other_version = None
25         self.connected2 = False
26     
27     def connectionMade(self):
28         p2protocol.Protocol.connectionMade(self)
29         
30         self.factory.proto_made_connection(self)
31         
32         self.connection_lost_event = variable.Event()
33         
34         self.addr = self.transport.getPeer().host, self.transport.getPeer().port
35         
36         self.send_version(
37             version=4,
38             services=0,
39             addr_to=dict(
40                 services=0,
41                 address=self.transport.getPeer().host,
42                 port=self.transport.getPeer().port,
43             ),
44             addr_from=dict(
45                 services=0,
46                 address=self.transport.getHost().host,
47                 port=self.transport.getHost().port,
48             ),
49             nonce=self.node.nonce,
50             sub_version=p2pool.__version__,
51             mode=1,
52             best_share_hash=self.node.best_share_hash_func(),
53         )
54         
55         self.timeout_delayed = reactor.callLater(10, self._connect_timeout)
56         
57         self.get_shares = deferral.GenericDeferrer(
58             max_id=2**256,
59             func=lambda id, hashes, parents, stops: self.send_sharereq(id=id, hashes=hashes, parents=parents, stops=stops),
60             timeout=15,
61             on_timeout=self.transport.loseConnection,
62         )
63         
64         self.remote_tx_hashes = set() # view of peer's known_txs # not actually initially empty, but sending txs instead of tx hashes won't hurt
65         self.remembered_txs = {} # view of peer's mining_txs
66     
67     def _connect_timeout(self):
68         self.timeout_delayed = None
69         print 'Handshake timed out, disconnecting from %s:%i' % self.addr
70         self.transport.loseConnection()
71     
72     def packetReceived(self, command, payload2):
73         try:
74             if command != 'version' and not self.connected2:
75                 raise PeerMisbehavingError('first message was not version message')
76             p2protocol.Protocol.packetReceived(self, command, payload2)
77         except PeerMisbehavingError, e:
78             print 'Peer %s:%i misbehaving, will drop and ban. Reason:' % self.addr, e.message
79             self.badPeerHappened()
80     
81     def badPeerHappened(self):
82         if p2pool.DEBUG:
83             print "Bad peer banned:", self.addr
84         self.transport.loseConnection()
85         self.node.bans[self.transport.getPeer().host] = time.time() + 60*60
86     
87     def _timeout(self):
88         self.timeout_delayed = None
89         print 'Connection timed out, disconnecting from %s:%i' % self.addr
90         self.transport.loseConnection()
91     
92     message_version = pack.ComposedType([
93         ('version', pack.IntType(32)),
94         ('services', pack.IntType(64)),
95         ('addr_to', bitcoin_data.address_type),
96         ('addr_from', bitcoin_data.address_type),
97         ('nonce', pack.IntType(64)),
98         ('sub_version', pack.VarStrType()),
99         ('mode', pack.IntType(32)), # always 1 for legacy compatibility
100         ('best_share_hash', pack.PossiblyNoneType(0, pack.IntType(256))),
101     ])
102     def handle_version(self, version, services, addr_to, addr_from, nonce, sub_version, mode, best_share_hash):
103         if self.other_version is not None:
104             raise PeerMisbehavingError('more than one version message')
105         if version < 4:
106             raise PeerMisbehavingError('peer too old')
107         
108         self.other_version = version
109         self.other_sub_version = sub_version[:512]
110         self.other_services = services
111         
112         if nonce == self.node.nonce:
113             raise PeerMisbehavingError('was connected to self')
114         if nonce in self.node.peers:
115             if p2pool.DEBUG:
116                 print 'Detected duplicate connection, disconnecting from %s:%i' % self.addr
117             self.transport.loseConnection()
118             return
119         
120         self.nonce = nonce
121         self.connected2 = True
122         
123         self.timeout_delayed.cancel()
124         self.timeout_delayed = reactor.callLater(100, self._timeout)
125         
126         old_dataReceived = self.dataReceived
127         def new_dataReceived(data):
128             if self.timeout_delayed is not None:
129                 self.timeout_delayed.reset(100)
130             old_dataReceived(data)
131         self.dataReceived = new_dataReceived
132         
133         self.factory.proto_connected(self)
134         
135         self._stop_thread = deferral.run_repeatedly(lambda: [
136             self.send_ping(),
137         random.expovariate(1/100)][-1])
138         
139         self._stop_thread2 = deferral.run_repeatedly(lambda: [
140             self.send_addrme(port=self.node.port),
141         random.expovariate(1/(100*len(self.node.peers) + 1))][-1])
142         
143         if best_share_hash is not None:
144             self.node.handle_share_hashes([best_share_hash], self)
145         
146         def update_remote_view_of_my_known_txs(before, after):
147             added = set(after) - set(before)
148             removed = set(before) - set(after)
149             if added:
150                 self.send_have_tx(tx_hashes=list(added))
151             if removed:
152                 self.send_losing_tx(tx_hashes=list(removed))
153                 # XXX cache locally
154         watch_id = self.node.known_txs_var.transitioned.watch(update_remote_view_of_my_known_txs)
155         self.connection_lost_event.watch(lambda: self.node.known_txs_var.transitioned.unwatch(watch_id))
156         
157         self.send_have_tx(tx_hashes=self.node.known_txs_var.value.keys())
158         
159         def update_remote_view_of_my_mining_txs(before, after):
160             added = set(after) - set(before)
161             removed = set(before) - set(after)
162             if added:
163                 self.send_remember_tx(tx_hashes=[x for x in added if x in self.remote_tx_hashes], txs=[after[x] for x in added if x not in self.remote_tx_hashes])
164             if removed:
165                 self.send_forget_tx(tx_hashes=removed)
166         watch_id2 = self.node.mining_txs_var.transitioned.watch(update_remote_view_of_my_mining_txs)
167         self.connection_lost_event.watch(lambda: self.node.mining_txs_var.transitioned.unwatch(watch_id2))
168         
169         self.send_remember_tx(tx_hashes=[], txs=self.node.mining_txs_var.value.values())
170     
171     message_ping = pack.ComposedType([])
172     def handle_ping(self):
173         pass
174     
175     message_addrme = pack.ComposedType([
176         ('port', pack.IntType(16)),
177     ])
178     def handle_addrme(self, port):
179         host = self.transport.getPeer().host
180         #print 'addrme from', host, port
181         if host == '127.0.0.1':
182             if random.random() < .8 and self.node.peers:
183                 random.choice(self.node.peers.values()).send_addrme(port=port) # services...
184         else:
185             self.node.got_addr((self.transport.getPeer().host, port), self.other_services, int(time.time()))
186             if random.random() < .8 and self.node.peers:
187                 random.choice(self.node.peers.values()).send_addrs(addrs=[
188                     dict(
189                         address=dict(
190                             services=self.other_services,
191                             address=host,
192                             port=port,
193                         ),
194                         timestamp=int(time.time()),
195                     ),
196                 ])
197     
198     message_addrs = pack.ComposedType([
199         ('addrs', pack.ListType(pack.ComposedType([
200             ('timestamp', pack.IntType(64)),
201             ('address', bitcoin_data.address_type),
202         ]))),
203     ])
204     def handle_addrs(self, addrs):
205         for addr_record in addrs:
206             self.node.got_addr((addr_record['address']['address'], addr_record['address']['port']), addr_record['address']['services'], min(int(time.time()), addr_record['timestamp']))
207             if random.random() < .8 and self.node.peers:
208                 random.choice(self.node.peers.values()).send_addrs(addrs=[addr_record])
209     
210     message_getaddrs = pack.ComposedType([
211         ('count', pack.IntType(32)),
212     ])
213     def handle_getaddrs(self, count):
214         if count > 100:
215             count = 100
216         self.send_addrs(addrs=[
217             dict(
218                 timestamp=int(self.node.addr_store[host, port][2]),
219                 address=dict(
220                     services=self.node.addr_store[host, port][0],
221                     address=host,
222                     port=port,
223                 ),
224             ) for host, port in
225             self.node.get_good_peers(count)
226         ])
227     
228     message_shares = pack.ComposedType([
229         ('shares', pack.ListType(p2pool_data.share_type)),
230     ])
231     def handle_shares(self, shares):
232         self.node.handle_shares([p2pool_data.load_share(share, self.node.net, self) for share in shares if share['type'] not in [6, 7]], self)
233     
234     def sendShares(self, shares):
235         def att(f, **kwargs):
236             try:
237                 return f(**kwargs)
238             except p2protocol.TooLong:
239                 att(f, **dict((k, v[:len(v)//2]) for k, v in kwargs.iteritems()))
240                 return att(f, **dict((k, v[len(v)//2:]) for k, v in kwargs.iteritems()))
241         if shares:
242             return att(self.send_shares, shares=[share.as_share() for share in shares])
243         else:
244             return defer.succeed(None)
245     
246     
247     message_sharereq = pack.ComposedType([
248         ('id', pack.IntType(256)),
249         ('hashes', pack.ListType(pack.IntType(256))),
250         ('parents', pack.VarIntType()),
251         ('stops', pack.ListType(pack.IntType(256))),
252     ])
253     def handle_sharereq(self, id, hashes, parents, stops):
254         shares = self.node.handle_get_shares(hashes, parents, stops, self)
255         try:
256             self.send_sharereply(id=id, result='good', shares=[share.as_share() for share in shares])
257         except p2protocol.TooLong:
258             self.send_sharereply(id=id, result='too long', shares=[])
259     
260     message_sharereply = pack.ComposedType([
261         ('id', pack.IntType(256)),
262         ('result', pack.EnumType(pack.VarIntType(), {0: 'good', 1: 'too long', 2: 'unk2', 3: 'unk3', 4: 'unk4', 5: 'unk5', 6: 'unk6'})),
263         ('shares', pack.ListType(p2pool_data.share_type)),
264     ])
265     def handle_sharereply(self, id, result, shares):
266         if result == 'good':
267             res = [p2pool_data.load_share(share, self.node.net, self) for share in shares if share['type'] not in [6, 7]]
268         else:
269             res = failure.Failure("sharereply result: " + result)
270         self.get_shares.got_response(id, res)
271     
272     
273     message_bestblock = pack.ComposedType([
274         ('header', bitcoin_data.block_header_type),
275     ])
276     def handle_bestblock(self, header):
277         self.node.handle_bestblock(header, self)
278     
279     
280     message_have_tx = pack.ComposedType([
281         ('tx_hashes', pack.ListType(pack.IntType(256))),
282     ])
283     def handle_have_tx(self, tx_hashes):
284         self.remote_tx_hashes.update(tx_hashes)
285     message_losing_tx = pack.ComposedType([
286         ('tx_hashes', pack.ListType(pack.IntType(256))),
287     ])
288     def handle_losing_tx(self, tx_hashes):
289         self.remote_tx_hashes.difference_update(tx_hashes)
290     
291     
292     message_remember_tx = pack.ComposedType([
293         ('tx_hashes', pack.ListType(pack.IntType(256))),
294         ('txs', pack.ListType(bitcoin_data.tx_type)),
295     ])
296     def handle_remember_tx(self, tx_hashes, txs):
297         for tx_hash in tx_hashes:
298             if tx_hash not in self.remembered_txs:
299                 self.remembered_txs[tx_hash] = self.node.known_txs_var.value[tx_hash]
300         for tx in txs:
301             tx_hash = bitcoin_data.hash256(bitcoin_data.tx_type.pack(tx))
302             if tx_hash not in self.remembered_txs:
303                 self.remembered_txs[tx_hash] = tx
304     message_forget_tx = pack.ComposedType([
305         ('tx_hashes', pack.ListType(pack.IntType(256))),
306     ])
307     def handle_forget_tx(self, tx_hashes):
308         for tx_hash in tx_hashes:
309             del self.remembered_txs[tx_hash]
310     
311     
312     def connectionLost(self, reason):
313         self.connection_lost_event.happened()
314         if self.timeout_delayed is not None:
315             self.timeout_delayed.cancel()
316         if self.connected2:
317             self.factory.proto_disconnected(self, reason)
318             self._stop_thread()
319             self._stop_thread2()
320             self.connected2 = False
321         self.factory.proto_lost_connection(self, reason)
322         if p2pool.DEBUG:
323             print "Peer connection lost:", self.addr, reason
324         self.get_shares.respond_all(reason)
325     
326     @defer.inlineCallbacks
327     def do_ping(self):
328         start = reactor.seconds()
329         yield self.get_shares(hashes=[0], parents=0, stops=[])
330         end = reactor.seconds()
331         defer.returnValue(end - start)
332
333 class ServerFactory(protocol.ServerFactory):
334     def __init__(self, node, max_conns):
335         self.node = node
336         self.max_conns = max_conns
337         
338         self.conns = {}
339         self.running = False
340     
341     def buildProtocol(self, addr):
342         if sum(self.conns.itervalues()) >= self.max_conns or self.conns.get(self._host_to_ident(addr.host), 0) >= 3:
343             return None
344         if addr.host in self.node.bans and self.node.bans[addr.host] > time.time():
345             return None
346         p = Protocol(self.node, True)
347         p.factory = self
348         if p2pool.DEBUG:
349             print "Got peer connection from:", addr
350         return p
351     
352     def _host_to_ident(self, host):
353         a, b, c, d = host.split('.')
354         return a, b
355     
356     def proto_made_connection(self, proto):
357         ident = self._host_to_ident(proto.transport.getPeer().host)
358         self.conns[ident] = self.conns.get(ident, 0) + 1
359     def proto_lost_connection(self, proto, reason):
360         ident = self._host_to_ident(proto.transport.getPeer().host)
361         self.conns[ident] -= 1
362         if not self.conns[ident]:
363             del self.conns[ident]
364     
365     def proto_connected(self, proto):
366         self.node.got_conn(proto)
367     def proto_disconnected(self, proto, reason):
368         self.node.lost_conn(proto, reason)
369     
370     def start(self):
371         assert not self.running
372         self.running = True
373         
374         def attempt_listen():
375             if self.running:
376                 self.listen_port = reactor.listenTCP(self.node.port, self)
377         deferral.retry('Error binding to P2P port:', traceback=False)(attempt_listen)()
378     
379     def stop(self):
380         assert self.running
381         self.running = False
382         
383         return self.listen_port.stopListening()
384
385 class ClientFactory(protocol.ClientFactory):
386     def __init__(self, node, desired_conns, max_attempts):
387         self.node = node
388         self.desired_conns = desired_conns
389         self.max_attempts = max_attempts
390         
391         self.attempts = set()
392         self.conns = set()
393         self.running = False
394     
395     def _host_to_ident(self, host):
396         a, b, c, d = host.split('.')
397         return a, b
398     
399     def buildProtocol(self, addr):
400         p = Protocol(self.node, False)
401         p.factory = self
402         return p
403     
404     def startedConnecting(self, connector):
405         ident = self._host_to_ident(connector.getDestination().host)
406         if ident in self.attempts:
407             raise AssertionError('already have attempt')
408         self.attempts.add(ident)
409     
410     def clientConnectionFailed(self, connector, reason):
411         self.attempts.remove(self._host_to_ident(connector.getDestination().host))
412     
413     def clientConnectionLost(self, connector, reason):
414         self.attempts.remove(self._host_to_ident(connector.getDestination().host))
415     
416     def proto_made_connection(self, proto):
417         pass
418     def proto_lost_connection(self, proto, reason):
419         pass
420     
421     def proto_connected(self, proto):
422         self.conns.add(proto)
423         self.node.got_conn(proto)
424     def proto_disconnected(self, proto, reason):
425         self.conns.remove(proto)
426         self.node.lost_conn(proto, reason)
427     
428     def start(self):
429         assert not self.running
430         self.running = True
431         self._stop_thinking = deferral.run_repeatedly(self._think)
432     def stop(self):
433         assert self.running
434         self.running = False
435         self._stop_thinking()
436     
437     def _think(self):
438         try:
439             if len(self.conns) < self.desired_conns and len(self.attempts) < self.max_attempts and self.node.addr_store:
440                 (host, port), = self.node.get_good_peers(1)
441                 
442                 if self._host_to_ident(host) in self.attempts:
443                     pass
444                 elif host in self.node.bans and self.node.bans[host] > time.time():
445                     pass
446                 else:
447                     #print 'Trying to connect to', host, port
448                     reactor.connectTCP(host, port, self, timeout=5)
449         except:
450             log.err()
451         
452         return random.expovariate(1/1)
453
454 class SingleClientFactory(protocol.ReconnectingClientFactory):
455     def __init__(self, node):
456         self.node = node
457     
458     def buildProtocol(self, addr):
459         p = Protocol(self.node, incoming=False)
460         p.factory = self
461         return p
462     
463     def proto_made_connection(self, proto):
464         pass
465     def proto_lost_connection(self, proto, reason):
466         pass
467     
468     def proto_connected(self, proto):
469         self.resetDelay()
470         self.node.got_conn(proto)
471     def proto_disconnected(self, proto, reason):
472         self.node.lost_conn(proto, reason)
473
474 class Node(object):
475     def __init__(self, best_share_hash_func, port, net, known_txs_var=variable.Variable({}), mining_txs_var=variable.Variable({}), addr_store={}, connect_addrs=set(), desired_outgoing_conns=10, max_outgoing_attempts=30, max_incoming_conns=50, preferred_storage=1000, traffic_happened=variable.Event()):
476         self.best_share_hash_func = best_share_hash_func
477         self.port = port
478         self.net = net
479         self.known_txs_var = known_txs_var
480         self.mining_txs_var = mining_txs_var
481         self.addr_store = dict(addr_store)
482         self.connect_addrs = connect_addrs
483         self.preferred_storage = preferred_storage
484         self.traffic_happened = traffic_happened
485         
486         self.nonce = random.randrange(2**64)
487         self.peers = {}
488         self.bans = {} # address -> end_time
489         self.clientfactory = ClientFactory(self, desired_outgoing_conns, max_outgoing_attempts)
490         self.serverfactory = ServerFactory(self, max_incoming_conns)
491         self.running = False
492     
493     def start(self):
494         if self.running:
495             raise ValueError('already running')
496         
497         self.clientfactory.start()
498         self.serverfactory.start()
499         self.singleclientconnectors = [reactor.connectTCP(addr, port, SingleClientFactory(self)) for addr, port in self.connect_addrs]
500         
501         self.running = True
502         
503         self._stop_thinking = deferral.run_repeatedly(self._think)
504     
505     def _think(self):
506         try:
507             if len(self.addr_store) < self.preferred_storage and self.peers:
508                 random.choice(self.peers.values()).send_getaddrs(count=8)
509         except:
510             log.err()
511         
512         return random.expovariate(1/20)
513     
514     @defer.inlineCallbacks
515     def stop(self):
516         if not self.running:
517             raise ValueError('already stopped')
518         
519         self.running = False
520         
521         self._stop_thinking()
522         yield self.clientfactory.stop()
523         yield self.serverfactory.stop()
524         for singleclientconnector in self.singleclientconnectors:
525             yield singleclientconnector.factory.stopTrying()
526             yield singleclientconnector.disconnect()
527         del self.singleclientconnectors
528     
529     def got_conn(self, conn):
530         if conn.nonce in self.peers:
531             raise ValueError('already have peer')
532         self.peers[conn.nonce] = conn
533         
534         print '%s connection to peer %s:%i established. p2pool version: %i %r' % ('Incoming' if conn.incoming else 'Outgoing', conn.addr[0], conn.addr[1], conn.other_version, conn.other_sub_version)
535     
536     def lost_conn(self, conn, reason):
537         if conn.nonce not in self.peers:
538             raise ValueError('''don't have peer''')
539         if conn is not self.peers[conn.nonce]:
540             raise ValueError('wrong conn')
541         del self.peers[conn.nonce]
542         
543         print 'Lost peer %s:%i - %s' % (conn.addr[0], conn.addr[1], reason.getErrorMessage())
544     
545     
546     def got_addr(self, (host, port), services, timestamp):
547         if (host, port) in self.addr_store:
548             old_services, old_first_seen, old_last_seen = self.addr_store[host, port]
549             self.addr_store[host, port] = services, old_first_seen, max(old_last_seen, timestamp)
550         else:
551             self.addr_store[host, port] = services, timestamp, timestamp
552     
553     def handle_shares(self, shares, peer):
554         print 'handle_shares', (shares, peer)
555     
556     def handle_share_hashes(self, hashes, peer):
557         print 'handle_share_hashes', (hashes, peer)
558     
559     def handle_get_shares(self, hashes, parents, stops, peer):
560         print 'handle_get_shares', (hashes, parents, stops, peer)
561     
562     def handle_bestblock(self, header, peer):
563         print 'handle_bestblock', header
564     
565     def get_good_peers(self, max_count):
566         t = time.time()
567         return [x[0] for x in sorted(self.addr_store.iteritems(), key=lambda (k, (services, first_seen, last_seen)):
568             -math.log(max(3600, last_seen - first_seen))/math.log(max(3600, t - last_seen))*random.expovariate(1)
569         )][:max_count]