from __future__ import division
-import ConfigParser
-import StringIO
-import argparse
import base64
+import gc
import json
import os
import random
if '--iocp' in sys.argv:
from twisted.internet import iocpreactor
iocpreactor.install()
-from twisted.internet import defer, reactor, protocol, task
+from twisted.internet import defer, reactor, protocol, tcp
from twisted.web import server
from twisted.python import log
from nattraverso import portmapper, ipdiscover
-import bitcoin.p2p as bitcoin_p2p, bitcoin.getwork as bitcoin_getwork, bitcoin.data as bitcoin_data
-from bitcoin import worker_interface, height_tracker
-from util import expiring_dict, jsonrpc, variable, deferral, math, logging, pack
-from . import p2p, networks, web
-import p2pool, p2pool.data as p2pool_data
-
-@deferral.retry('Error getting work from bitcoind:', 3)
-@defer.inlineCallbacks
-def getwork(bitcoind):
- try:
- work = yield bitcoind.rpc_getmemorypool()
- except jsonrpc.Error, e:
- if e.code == -32601: # Method not found
- print >>sys.stderr, 'Error: Bitcoin version too old! Upgrade to v0.5 or newer!'
- raise deferral.RetrySilentlyException()
- raise
- packed_transactions = [x.decode('hex') for x in work['transactions']]
- defer.returnValue(dict(
- version=work['version'],
- previous_block_hash=int(work['previousblockhash'], 16),
- transactions=map(bitcoin_data.tx_type.unpack, packed_transactions),
- merkle_link=bitcoin_data.calculate_merkle_link([0] + map(bitcoin_data.hash256, packed_transactions), 0), # using 0 is a bit of a hack, but will always work when index=0
- subsidy=work['coinbasevalue'],
- time=work['time'],
- bits=bitcoin_data.FloatingIntegerType().unpack(work['bits'].decode('hex')[::-1]) if isinstance(work['bits'], (str, unicode)) else bitcoin_data.FloatingInteger(work['bits']),
- coinbaseflags=work['coinbaseflags'].decode('hex') if 'coinbaseflags' in work else ''.join(x.decode('hex') for x in work['coinbaseaux'].itervalues()) if 'coinbaseaux' in work else '',
- ))
+import bitcoin.p2p as bitcoin_p2p, bitcoin.data as bitcoin_data
+from bitcoin import stratum, worker_interface, helper
+from util import fixargparse, jsonrpc, variable, deferral, math, logging, switchprotocol
+from . import networks, web, work
+import p2pool, p2pool.data as p2pool_data, p2pool.node as p2pool_node
@defer.inlineCallbacks
def main(args, net, datadir_path, merged_urls, worker_endpoint):
try:
print 'p2pool (version %s)' % (p2pool.__version__,)
print
+
+ @defer.inlineCallbacks
+ def connect_p2p():
+ # connect to bitcoind over bitcoin-p2p
+ print '''Testing bitcoind P2P connection to '%s:%s'...''' % (args.bitcoind_address, args.bitcoind_p2p_port)
+ factory = bitcoin_p2p.ClientFactory(net.PARENT)
+ reactor.connectTCP(args.bitcoind_address, args.bitcoind_p2p_port, factory)
+ def long():
+ print ''' ...taking a while. Common reasons for this include all of bitcoind's connection slots being used...'''
+ long_dc = reactor.callLater(5, long)
+ yield factory.getProtocol() # waits until handshake is successful
+ if not long_dc.called: long_dc.cancel()
+ print ' ...success!'
+ print
+ defer.returnValue(factory)
+
+ if args.testnet: # establish p2p connection first if testnet so bitcoind can work without connections
+ factory = yield connect_p2p()
# connect to bitcoind over JSON-RPC and do initial getmemorypool
- url = 'http://%s:%i/' % (args.bitcoind_address, args.bitcoind_rpc_port)
+ url = '%s://%s:%i/' % ('https' if args.bitcoind_rpc_ssl else 'http', args.bitcoind_address, args.bitcoind_rpc_port)
print '''Testing bitcoind RPC connection to '%s' with username '%s'...''' % (url, args.bitcoind_rpc_username)
- bitcoind = jsonrpc.Proxy(url, dict(Authorization='Basic ' + base64.b64encode(args.bitcoind_rpc_username + ':' + args.bitcoind_rpc_password)), timeout=30)
- @deferral.retry('Error while checking Bitcoin connection:', 1)
+ bitcoind = jsonrpc.HTTPProxy(url, dict(Authorization='Basic ' + base64.b64encode(args.bitcoind_rpc_username + ':' + args.bitcoind_rpc_password)), timeout=30)
+ yield helper.check(bitcoind, net)
+ temp_work = yield helper.getwork(bitcoind)
+
+ bitcoind_getinfo_var = variable.Variable(None)
@defer.inlineCallbacks
- def check():
- if not (yield net.PARENT.RPC_CHECK)(bitcoind):
- print >>sys.stderr, " Check failed! Make sure that you're connected to the right bitcoind with --bitcoind-rpc-port!"
- raise deferral.RetrySilentlyException()
- temp_work = yield getwork(bitcoind)
- if not net.VERSION_CHECK((yield bitcoind.rpc_getinfo())['version'], temp_work):
- print >>sys.stderr, ' Bitcoin version too old! BIP16 support required! Upgrade to 0.6.0rc4 or greater!'
- raise deferral.RetrySilentlyException()
- defer.returnValue(temp_work)
- temp_work = yield check()
- print ' ...success!'
- print ' Current block hash: %x' % (temp_work['previous_block_hash'],)
- print
+ def poll_warnings():
+ bitcoind_getinfo_var.set((yield deferral.retry('Error while calling getinfo:')(bitcoind.rpc_getinfo)()))
+ yield poll_warnings()
+ deferral.RobustLoopingCall(poll_warnings).start(20*60)
- # connect to bitcoind over bitcoin-p2p
- print '''Testing bitcoind P2P connection to '%s:%s'...''' % (args.bitcoind_address, args.bitcoind_p2p_port)
- factory = bitcoin_p2p.ClientFactory(net.PARENT)
- reactor.connectTCP(args.bitcoind_address, args.bitcoind_p2p_port, factory)
- yield factory.getProtocol() # waits until handshake is successful
print ' ...success!'
+ print ' Current block hash: %x' % (temp_work['previous_block'],)
+ print ' Current block height: %i' % (temp_work['height'] - 1,)
print
+ if not args.testnet:
+ factory = yield connect_p2p()
+
print 'Determining payout address...'
if args.pubkey_hash is None:
address_path = os.path.join(datadir_path, 'cached_payout_address')
print ' ...success! Payout address:', bitcoin_data.pubkey_hash_to_address(my_pubkey_hash, net.PARENT)
print
- my_share_hashes = set()
- my_doa_share_hashes = set()
-
- tracker = p2pool_data.OkayTracker(net, my_share_hashes, my_doa_share_hashes)
- shared_share_hashes = set()
- ss = p2pool_data.ShareStore(os.path.join(datadir_path, 'shares.'), net)
- known_verified = set()
print "Loading shares..."
- for i, (mode, contents) in enumerate(ss.get_shares()):
- if mode == 'share':
- if contents.hash in tracker.shares:
- continue
- shared_share_hashes.add(contents.hash)
- contents.time_seen = 0
- tracker.add(contents)
- if len(tracker.shares) % 1000 == 0 and tracker.shares:
- print " %i" % (len(tracker.shares),)
- elif mode == 'verified_hash':
- known_verified.add(contents)
- else:
- raise AssertionError()
- print " ...inserting %i verified shares..." % (len(known_verified),)
- for h in known_verified:
- if h not in tracker.shares:
- ss.forget_verified_share(h)
- continue
- tracker.verified.add(tracker.shares[h])
- print " ...done loading %i shares!" % (len(tracker.shares),)
+ shares = {}
+ known_verified = set()
+ def share_cb(share):
+ share.time_seen = 0 # XXX
+ shares[share.hash] = share
+ if len(shares) % 1000 == 0 and shares:
+ print " %i" % (len(shares),)
+ ss = p2pool_data.ShareStore(os.path.join(datadir_path, 'shares.'), net, share_cb, known_verified.add)
+ print " ...done loading %i shares (%i verified)!" % (len(shares), len(known_verified))
print
- tracker.removed.watch(lambda share: ss.forget_share(share.hash))
- tracker.verified.removed.watch(lambda share: ss.forget_verified_share(share.hash))
- tracker.removed.watch(lambda share: shared_share_hashes.discard(share.hash))
- peer_heads = expiring_dict.ExpiringDict(300) # hash -> peers that know of it
- pre_current_work = variable.Variable(None)
- pre_merged_work = variable.Variable({})
- # information affecting work that should trigger a long-polling update
- current_work = variable.Variable(None)
- # information affecting work that should not trigger a long-polling update
- current_work2 = variable.Variable(None)
+ print 'Initializing work...'
- requested = expiring_dict.ExpiringDict(300)
+ node = p2pool_node.Node(factory, bitcoind, shares.values(), known_verified, net)
+ yield node.start()
- print 'Initializing work...'
- @defer.inlineCallbacks
- def set_real_work1():
- work = yield getwork(bitcoind)
- current_work2.set(dict(
- time=work['time'],
- transactions=work['transactions'],
- merkle_link=work['merkle_link'],
- subsidy=work['subsidy'],
- clock_offset=time.time() - work['time'],
- last_update=time.time(),
- )) # second set first because everything hooks on the first
- pre_current_work.set(dict(
- version=work['version'],
- previous_block=work['previous_block_hash'],
- bits=work['bits'],
- coinbaseflags=work['coinbaseflags'],
- ))
- yield set_real_work1()
+ for share_hash in shares:
+ if share_hash not in node.tracker.items:
+ ss.forget_share(share_hash)
+ for share_hash in known_verified:
+ if share_hash not in node.tracker.verified.items:
+ ss.forget_verified_share(share_hash)
+ node.tracker.removed.watch(lambda share: ss.forget_share(share.hash))
+ node.tracker.verified.removed.watch(lambda share: ss.forget_verified_share(share.hash))
- get_height_rel_highest = yield height_tracker.get_height_rel_highest_func(bitcoind, factory, lambda: pre_current_work.value['previous_block'], net)
+ def save_shares():
+ for share in node.tracker.get_chain(node.best_share_var.value, min(node.tracker.get_height(node.best_share_var.value), 2*net.CHAIN_LENGTH)):
+ ss.add_share(share)
+ if share.hash in node.tracker.verified.items:
+ ss.add_verified_hash(share.hash)
+ deferral.RobustLoopingCall(save_shares).start(60)
- def set_real_work2():
- best, desired = tracker.think(get_height_rel_highest, pre_current_work.value['previous_block'], pre_current_work.value['bits'])
-
- t = dict(pre_current_work.value)
- t['best_share_hash'] = best
- t['mm_chains'] = pre_merged_work.value
- current_work.set(t)
-
- t = time.time()
- for peer2, share_hash in desired:
- if share_hash not in tracker.tails: # was received in the time tracker.think was running
- continue
- last_request_time, count = requested.get(share_hash, (None, 0))
- if last_request_time is not None and last_request_time - 5 < t < last_request_time + 10 * 1.5**count:
- continue
- potential_peers = set()
- for head in tracker.tails[share_hash]:
- potential_peers.update(peer_heads.get(head, set()))
- potential_peers = [peer for peer in potential_peers if peer.connected2]
- if count == 0 and peer2 is not None and peer2.connected2:
- peer = peer2
- else:
- peer = random.choice(potential_peers) if potential_peers and random.random() > .2 else peer2
- if peer is None:
- continue
-
- print 'Requesting parent share %s from %s' % (p2pool_data.format_hash(share_hash), '%s:%i' % peer.addr)
- peer.send_getshares(
- hashes=[share_hash],
- parents=2000,
- stops=list(set(tracker.heads) | set(
- tracker.get_nth_parent_hash(head, min(max(0, tracker.get_height_and_last(head)[0] - 1), 10)) for head in tracker.heads
- ))[:100],
- )
- requested[share_hash] = t, count + 1
- pre_current_work.changed.watch(lambda _: set_real_work2())
- pre_merged_work.changed.watch(lambda _: set_real_work2())
- set_real_work2()
print ' ...success!'
print
- @defer.inlineCallbacks
- def set_merged_work(merged_url, merged_userpass):
- merged_proxy = jsonrpc.Proxy(merged_url, dict(Authorization='Basic ' + base64.b64encode(merged_userpass)))
- while True:
- auxblock = yield deferral.retry('Error while calling merged getauxblock:', 1)(merged_proxy.rpc_getauxblock)()
- pre_merged_work.set(dict(pre_merged_work.value, **{auxblock['chainid']: dict(
- hash=int(auxblock['hash'], 16),
- target=pack.IntType(256).unpack(auxblock['target'].decode('hex')),
- merged_proxy=merged_proxy,
- )}))
- yield deferral.sleep(1)
- for merged_url, merged_userpass in merged_urls:
- set_merged_work(merged_url, merged_userpass)
-
- @pre_merged_work.changed.watch
- def _(new_merged_work):
- print 'Got new merged mining work!'
-
- # setup p2p logic and join p2pool network
-
- class Node(p2p.Node):
- def handle_shares(self, shares, peer):
- if len(shares) > 5:
- print 'Processing %i shares from %s...' % (len(shares), '%s:%i' % peer.addr if peer is not None else None)
-
- new_count = 0
- for share in shares:
- if share.hash in tracker.shares:
- #print 'Got duplicate share, ignoring. Hash: %s' % (p2pool_data.format_hash(share.hash),)
- continue
-
- new_count += 1
-
- #print 'Received share %s from %r' % (p2pool_data.format_hash(share.hash), share.peer.addr if share.peer is not None else None)
-
- tracker.add(share)
-
- if shares and peer is not None:
- peer_heads.setdefault(shares[0].hash, set()).add(peer)
-
- if new_count:
- set_real_work2()
-
- if len(shares) > 5:
- print '... done processing %i shares. New: %i Have: %i/~%i' % (len(shares), new_count, len(tracker.shares), 2*net.CHAIN_LENGTH)
-
- def handle_share_hashes(self, hashes, peer):
- t = time.time()
- get_hashes = []
- for share_hash in hashes:
- if share_hash in tracker.shares:
- continue
- last_request_time, count = requested.get(share_hash, (None, 0))
- if last_request_time is not None and last_request_time - 5 < t < last_request_time + 10 * 1.5**count:
- continue
- print 'Got share hash, requesting! Hash: %s' % (p2pool_data.format_hash(share_hash),)
- get_hashes.append(share_hash)
- requested[share_hash] = t, count + 1
-
- if hashes and peer is not None:
- peer_heads.setdefault(hashes[0], set()).add(peer)
- if get_hashes:
- peer.send_getshares(hashes=get_hashes, parents=0, stops=[])
-
- def handle_get_shares(self, hashes, parents, stops, peer):
- parents = min(parents, 1000//len(hashes))
- stops = set(stops)
- shares = []
- for share_hash in hashes:
- for share in tracker.get_chain(share_hash, min(parents + 1, tracker.get_height(share_hash))):
- if share.hash in stops:
- break
- shares.append(share)
- print 'Sending %i shares to %s:%i' % (len(shares), peer.addr[0], peer.addr[1])
- return shares
-
- @deferral.retry('Error submitting primary block: (will retry)', 10, 10)
- def submit_block_p2p(block):
- if factory.conn.value is None:
- print >>sys.stderr, 'No bitcoind connection when block submittal attempted! %s%32x' % (net.PARENT.BLOCK_EXPLORER_URL_PREFIX, bitcoin_data.hash256(bitcoin_data.block_header_type.pack(block['header'])))
- raise deferral.RetrySilentlyException()
- factory.conn.value.send_block(block=block)
-
- @deferral.retry('Error submitting block: (will retry)', 10, 10)
- @defer.inlineCallbacks
- def submit_block_rpc(block, ignore_failure):
- success = yield bitcoind.rpc_getmemorypool(bitcoin_data.block_type.pack(block).encode('hex'))
- success_expected = net.PARENT.POW_FUNC(bitcoin_data.block_header_type.pack(block['header'])) <= block['header']['bits'].target
- if (not success and success_expected and not ignore_failure) or (success and not success_expected):
- print >>sys.stderr, 'Block submittal result: %s Expected: %s' % (success, success_expected)
-
- def submit_block(block, ignore_failure):
- submit_block_p2p(block)
- submit_block_rpc(block, ignore_failure)
-
- @tracker.verified.added.watch
- def _(share):
- if share.pow_hash <= share.header['bits'].target:
- submit_block(share.as_block(tracker), ignore_failure=True)
- print
- print 'GOT BLOCK FROM PEER! Passing to bitcoind! %s bitcoin: %s%064x' % (p2pool_data.format_hash(share.hash), net.PARENT.BLOCK_EXPLORER_URL_PREFIX, share.header_hash)
- print
- def spread():
- if (get_height_rel_highest(share.header['previous_block']) > -5 or
- current_work.value['previous_block'] in [share.header['previous_block'], share.header_hash]):
- broadcast_share(share.hash)
- spread()
- reactor.callLater(5, spread) # so get_height_rel_highest can update
-
print 'Joining p2pool network using port %i...' % (args.p2pool_port,)
@defer.inlineCallbacks
- def parse(x):
- if ':' in x:
- ip, port = x.split(':')
- defer.returnValue(((yield reactor.resolve(ip)), int(port)))
- else:
- defer.returnValue(((yield reactor.resolve(x)), net.P2P_PORT))
+ def parse(host):
+ port = net.P2P_PORT
+ if ':' in host:
+ host, port_str = host.split(':')
+ port = int(port_str)
+ defer.returnValue(((yield reactor.resolve(host)), port))
addrs = {}
if os.path.exists(os.path.join(datadir_path, 'addrs')):
addrs.update(dict((tuple(k), v) for k, v in json.loads(f.read())))
except:
print >>sys.stderr, 'error parsing addrs'
- elif os.path.exists(os.path.join(datadir_path, 'addrs.txt')):
- try:
- addrs.update(dict(eval(x) for x in open(os.path.join(datadir_path, 'addrs.txt'))))
- except:
- print >>sys.stderr, "error reading addrs.txt"
for addr_df in map(parse, net.BOOTSTRAP_ADDRS):
try:
addr = yield addr_df
except:
log.err()
- p2p_node = Node(
- best_share_hash_func=lambda: current_work.value['best_share_hash'],
+ node.p2p_node = p2pool_node.P2PNode(node,
port=args.p2pool_port,
- net=net,
+ max_incoming_conns=args.p2pool_conns,
addr_store=addrs,
connect_addrs=connect_addrs,
- max_incoming_conns=args.p2pool_conns,
+ desired_outgoing_conns=args.p2pool_outgoing_conns,
+ advertise_ip=args.advertise_ip,
)
- p2p_node.start()
+ node.p2p_node.start()
def save_addrs():
with open(os.path.join(datadir_path, 'addrs'), 'wb') as f:
- f.write(json.dumps(p2p_node.addr_store.items()))
- task.LoopingCall(save_addrs).start(60)
-
- def broadcast_share(share_hash):
- shares = []
- for share in tracker.get_chain(share_hash, min(5, tracker.get_height(share_hash))):
- if share.hash in shared_share_hashes:
- break
- shared_share_hashes.add(share.hash)
- shares.append(share)
-
- for peer in p2p_node.peers.itervalues():
- peer.sendShares([share for share in shares if share.peer is not peer])
-
- # send share when the chain changes to their chain
- current_work.changed.watch(lambda new_work: broadcast_share(new_work['best_share_hash']))
-
- def save_shares():
- for share in tracker.get_chain(current_work.value['best_share_hash'], min(tracker.get_height(current_work.value['best_share_hash']), 2*net.CHAIN_LENGTH)):
- ss.add_share(share)
- if share.hash in tracker.verified.shares:
- ss.add_verified_hash(share.hash)
- task.LoopingCall(save_shares).start(60)
+ f.write(json.dumps(node.p2p_node.addr_store.items()))
+ deferral.RobustLoopingCall(save_addrs).start(60)
print ' ...success!'
print
print 'Listening for workers on %r port %i...' % (worker_endpoint[0], worker_endpoint[1])
- # setup worker logic
+ wb = work.WorkerBridge(node, my_pubkey_hash, args.donation_percentage, merged_urls, args.worker_fee)
+ web_root = web.get_web_root(wb, datadir_path, bitcoind_getinfo_var)
+ caching_wb = worker_interface.CachingWorkerBridge(wb)
+ worker_interface.WorkerInterface(caching_wb).attach_to(web_root, get_handler=lambda request: request.redirect('static/'))
+ web_serverfactory = server.Site(web_root)
- removed_unstales_var = variable.Variable((0, 0, 0))
- removed_doa_unstales_var = variable.Variable(0)
- @tracker.verified.removed.watch
- def _(share):
- if share.hash in my_share_hashes and tracker.is_child_of(share.hash, current_work.value['best_share_hash']):
- assert share.share_data['stale_info'] in [None, 'orphan', 'doa'] # we made these shares in this instance
- removed_unstales_var.set((
- removed_unstales_var.value[0] + 1,
- removed_unstales_var.value[1] + (1 if share.share_data['stale_info'] == 'orphan' else 0),
- removed_unstales_var.value[2] + (1 if share.share_data['stale_info'] == 'doa' else 0),
- ))
- if share.hash in my_doa_share_hashes and tracker.is_child_of(share.hash, current_work.value['best_share_hash']):
- removed_doa_unstales_var.set(removed_doa_unstales_var.value + 1)
- def get_stale_counts():
- '''Returns (orphans, doas), total, (orphans_recorded_in_chain, doas_recorded_in_chain)'''
- my_shares = len(my_share_hashes)
- my_doa_shares = len(my_doa_share_hashes)
- delta = tracker.verified.get_delta_to_last(current_work.value['best_share_hash'])
- my_shares_in_chain = delta.my_count + removed_unstales_var.value[0]
- my_doa_shares_in_chain = delta.my_doa_count + removed_doa_unstales_var.value
- orphans_recorded_in_chain = delta.my_orphan_announce_count + removed_unstales_var.value[1]
- doas_recorded_in_chain = delta.my_dead_announce_count + removed_unstales_var.value[2]
-
- my_shares_not_in_chain = my_shares - my_shares_in_chain
- my_doa_shares_not_in_chain = my_doa_shares - my_doa_shares_in_chain
-
- return (my_shares_not_in_chain - my_doa_shares_not_in_chain, my_doa_shares_not_in_chain), my_shares, (orphans_recorded_in_chain, doas_recorded_in_chain)
-
-
- pseudoshare_received = variable.Event()
- share_received = variable.Event()
- local_rate_monitor = math.RateMonitor(10*60)
-
- class WorkerBridge(worker_interface.WorkerBridge):
- def __init__(self):
- worker_interface.WorkerBridge.__init__(self)
- self.new_work_event = current_work.changed
- self.recent_shares_ts_work = []
-
- def get_user_details(self, request):
- user = request.getUser() if request.getUser() is not None else ''
-
- desired_pseudoshare_target = None
- if '+' in user:
- user, desired_pseudoshare_difficulty_str = user.rsplit('+', 1)
- try:
- desired_pseudoshare_target = bitcoin_data.difficulty_to_target(float(desired_pseudoshare_difficulty_str))
- except:
- pass
-
- desired_share_target = 2**256 - 1
- if '/' in user:
- user, min_diff_str = user.rsplit('/', 1)
- try:
- desired_share_target = bitcoin_data.difficulty_to_target(float(min_diff_str))
- except:
- pass
-
- if random.uniform(0, 100) < args.worker_fee:
- pubkey_hash = my_pubkey_hash
- else:
- try:
- pubkey_hash = bitcoin_data.address_to_pubkey_hash(user, net.PARENT)
- except: # XXX blah
- pubkey_hash = my_pubkey_hash
-
- return user, pubkey_hash, desired_share_target, desired_pseudoshare_target
-
- def preprocess_request(self, request):
- user, pubkey_hash, desired_share_target, desired_pseudoshare_target = self.get_user_details(request)
- return pubkey_hash, desired_share_target, desired_pseudoshare_target
-
- def get_work(self, pubkey_hash, desired_share_target, desired_pseudoshare_target):
- if len(p2p_node.peers) == 0 and net.PERSIST:
- raise jsonrpc.Error(-12345, u'p2pool is not connected to any peers')
- if current_work.value['best_share_hash'] is None and net.PERSIST:
- raise jsonrpc.Error(-12345, u'p2pool is downloading shares')
- if time.time() > current_work2.value['last_update'] + 60:
- raise jsonrpc.Error(-12345, u'lost contact with bitcoind')
-
- if current_work.value['mm_chains']:
- tree, size = bitcoin_data.make_auxpow_tree(current_work.value['mm_chains'])
- mm_hashes = [current_work.value['mm_chains'].get(tree.get(i), dict(hash=0))['hash'] for i in xrange(size)]
- mm_data = '\xfa\xbemm' + bitcoin_data.aux_pow_coinbase_type.pack(dict(
- merkle_root=bitcoin_data.merkle_hash(mm_hashes),
- size=size,
- nonce=0,
- ))
- mm_later = [(aux_work, mm_hashes.index(aux_work['hash']), mm_hashes) for chain_id, aux_work in current_work.value['mm_chains'].iteritems()]
- else:
- mm_data = ''
- mm_later = []
-
- share_type = p2pool_data.NewShare
- if current_work.value['best_share_hash'] is not None:
- previous_share = tracker.shares[current_work.value['best_share_hash']]
- if isinstance(previous_share, p2pool_data.Share):
- # Share -> NewShare only valid if 85% of hashes in [net.CHAIN_LENGTH*9//10, net.CHAIN_LENGTH] for new version
- if tracker.get_height(previous_share.hash) < net.CHAIN_LENGTH:
- share_type = p2pool_data.Share
- else:
- counts = p2pool_data.get_desired_version_counts(tracker,
- tracker.get_nth_parent_hash(previous_share.hash, net.CHAIN_LENGTH*9//10), net.CHAIN_LENGTH//10)
- if counts.get(2, 0) < sum(counts.itervalues())*95//100:
- share_type = p2pool_data.Share
-
- if True:
- share_info, generate_tx = share_type.generate_transaction(
- tracker=tracker,
- share_data=dict(
- previous_share_hash=current_work.value['best_share_hash'],
- coinbase=(mm_data + current_work.value['coinbaseflags'])[:100],
- nonce=random.randrange(2**32),
- pubkey_hash=pubkey_hash,
- subsidy=current_work2.value['subsidy'],
- donation=math.perfect_round(65535*args.donation_percentage/100),
- stale_info=(lambda (orphans, doas), total, (orphans_recorded_in_chain, doas_recorded_in_chain):
- 'orphan' if orphans > orphans_recorded_in_chain else
- 'doa' if doas > doas_recorded_in_chain else
- None
- )(*get_stale_counts()),
- desired_version=2,
- ),
- block_target=current_work.value['bits'].target,
- desired_timestamp=int(time.time() - current_work2.value['clock_offset']),
- desired_target=desired_share_target,
- ref_merkle_link=dict(branch=[], index=0),
- net=net,
- )
-
- if desired_pseudoshare_target is None:
- target = 2**256-1
- if len(self.recent_shares_ts_work) == 50:
- hash_rate = sum(work for ts, work in self.recent_shares_ts_work[1:])//(self.recent_shares_ts_work[-1][0] - self.recent_shares_ts_work[0][0])
- if hash_rate:
- target = min(target, int(2**256/hash_rate))
- else:
- target = desired_pseudoshare_target
- target = max(target, share_info['bits'].target)
- for aux_work in current_work.value['mm_chains'].itervalues():
- target = max(target, aux_work['target'])
- target = math.clip(target, net.PARENT.SANE_TARGET_RANGE)
-
- transactions = [generate_tx] + list(current_work2.value['transactions'])
- packed_generate_tx = bitcoin_data.tx_type.pack(generate_tx)
- merkle_root = bitcoin_data.check_merkle_link(bitcoin_data.hash256(packed_generate_tx), current_work2.value['merkle_link'])
-
- getwork_time = time.time()
- merkle_link = current_work2.value['merkle_link']
-
- print 'New work for worker! Difficulty: %.06f Share difficulty: %.06f Total block value: %.6f %s including %i transactions' % (
- bitcoin_data.target_to_difficulty(target),
- bitcoin_data.target_to_difficulty(share_info['bits'].target),
- current_work2.value['subsidy']*1e-8, net.PARENT.SYMBOL,
- len(current_work2.value['transactions']),
- )
-
- bits = current_work.value['bits']
- previous_block = current_work.value['previous_block']
- ba = bitcoin_getwork.BlockAttempt(
- version=current_work.value['version'],
- previous_block=current_work.value['previous_block'],
- merkle_root=merkle_root,
- timestamp=current_work2.value['time'],
- bits=current_work.value['bits'],
- share_target=target,
- )
-
- received_header_hashes = set()
-
- def got_response(header, request):
- header_hash = bitcoin_data.hash256(bitcoin_data.block_header_type.pack(header))
- pow_hash = net.PARENT.POW_FUNC(bitcoin_data.block_header_type.pack(header))
- try:
- if pow_hash <= header['bits'].target or p2pool.DEBUG:
- submit_block(dict(header=header, txs=transactions), ignore_failure=False)
- if pow_hash <= header['bits'].target:
- print
- print 'GOT BLOCK FROM MINER! Passing to bitcoind! %s%064x' % (net.PARENT.BLOCK_EXPLORER_URL_PREFIX, header_hash)
- print
- except:
- log.err(None, 'Error while processing potential block:')
-
- user, _, _, _ = self.get_user_details(request)
- assert header['merkle_root'] == merkle_root
- assert header['previous_block'] == previous_block
- assert header['bits'] == bits
-
- on_time = current_work.value['best_share_hash'] == share_info['share_data']['previous_share_hash']
-
- for aux_work, index, hashes in mm_later:
- try:
- if pow_hash <= aux_work['target'] or p2pool.DEBUG:
- df = deferral.retry('Error submitting merged block: (will retry)', 10, 10)(aux_work['merged_proxy'].rpc_getauxblock)(
- pack.IntType(256, 'big').pack(aux_work['hash']).encode('hex'),
- bitcoin_data.aux_pow_type.pack(dict(
- merkle_tx=dict(
- tx=transactions[0],
- block_hash=header_hash,
- merkle_link=merkle_link,
- ),
- merkle_link=bitcoin_data.calculate_merkle_link(hashes, index),
- parent_block_header=header,
- )).encode('hex'),
- )
- @df.addCallback
- def _(result):
- if result != (pow_hash <= aux_work['target']):
- print >>sys.stderr, 'Merged block submittal result: %s Expected: %s' % (result, pow_hash <= aux_work['target'])
- else:
- print 'Merged block submittal result: %s' % (result,)
- @df.addErrback
- def _(err):
- log.err(err, 'Error submitting merged block:')
- except:
- log.err(None, 'Error while processing merged mining POW:')
-
- if pow_hash <= share_info['bits'].target and header_hash not in received_header_hashes:
- min_header = dict(header);del min_header['merkle_root']
- hash_link = p2pool_data.prefix_to_hash_link(packed_generate_tx[:-32-4], share_type.gentx_before_refhash)
- share = share_type(net, None, dict(
- min_header=min_header, share_info=share_info, hash_link=hash_link,
- ref_merkle_link=dict(branch=[], index=0),
- ), merkle_link=merkle_link, other_txs=transactions[1:] if pow_hash <= header['bits'].target else None)
-
- print 'GOT SHARE! %s %s prev %s age %.2fs%s' % (
- request.getUser(),
- p2pool_data.format_hash(share.hash),
- p2pool_data.format_hash(share.previous_hash),
- time.time() - getwork_time,
- ' DEAD ON ARRIVAL' if not on_time else '',
- )
- my_share_hashes.add(share.hash)
- if not on_time:
- my_doa_share_hashes.add(share.hash)
-
- tracker.add(share)
- if not p2pool.DEBUG:
- tracker.verified.add(share)
- set_real_work2()
-
- try:
- if pow_hash <= header['bits'].target or p2pool.DEBUG:
- for peer in p2p_node.peers.itervalues():
- peer.sendShares([share])
- shared_share_hashes.add(share.hash)
- except:
- log.err(None, 'Error forwarding block solution:')
-
- share_received.happened(bitcoin_data.target_to_average_attempts(share.target), not on_time)
-
- if pow_hash > target:
- print 'Worker %s submitted share with hash > target:' % (request.getUser(),)
- print ' Hash: %56x' % (pow_hash,)
- print ' Target: %56x' % (target,)
- elif header_hash in received_header_hashes:
- print >>sys.stderr, 'Worker %s @ %s submitted share more than once!' % (request.getUser(), request.getClientIP())
- else:
- received_header_hashes.add(header_hash)
-
- pseudoshare_received.happened(bitcoin_data.target_to_average_attempts(target), not on_time, user)
- self.recent_shares_ts_work.append((time.time(), bitcoin_data.target_to_average_attempts(target)))
- while len(self.recent_shares_ts_work) > 50:
- self.recent_shares_ts_work.pop(0)
- local_rate_monitor.add_datum(dict(work=bitcoin_data.target_to_average_attempts(target), dead=not on_time, user=user))
-
- return on_time
-
- return ba, got_response
-
- get_current_txouts = lambda: p2pool_data.get_expected_payouts(tracker, current_work.value['best_share_hash'], current_work.value['bits'].target, current_work2.value['subsidy'], net)
-
- web_root = web.get_web_root(tracker, current_work, current_work2, get_current_txouts, datadir_path, net, get_stale_counts, my_pubkey_hash, local_rate_monitor, args.worker_fee, p2p_node, my_share_hashes, pseudoshare_received, share_received)
- worker_interface.WorkerInterface(WorkerBridge()).attach_to(web_root, get_handler=lambda request: request.redirect('/static/'))
-
- deferral.retry('Error binding to worker port:', traceback=False)(reactor.listenTCP)(worker_endpoint[1], server.Site(web_root), interface=worker_endpoint[0])
+ serverfactory = switchprotocol.FirstByteSwitchFactory({'{': stratum.StratumServerFactory(caching_wb)}, web_serverfactory)
+ deferral.retry('Error binding to worker port:', traceback=False)(reactor.listenTCP)(worker_endpoint[1], serverfactory, interface=worker_endpoint[0])
with open(os.path.join(os.path.join(datadir_path, 'ready_flag')), 'wb') as f:
pass
print
- @defer.inlineCallbacks
- def work_poller():
- while True:
- flag = factory.new_block.get_deferred()
- try:
- yield set_real_work1()
- except:
- log.err()
- yield defer.DeferredList([flag, deferral.sleep(15)], fireOnOneCallback=True)
- work_poller()
-
-
# done!
print 'Started successfully!'
print 'Go to http://127.0.0.1:%i/ to view graphs and statistics!' % (worker_endpoint[1],)
- if args.donation_percentage > 0.51:
+ if args.donation_percentage > 1.1:
print '''Donating %.1f%% of work towards P2Pool's development. Thanks for the tip!''' % (args.donation_percentage,)
- elif args.donation_percentage < 0.49:
+ elif args.donation_percentage < .9:
print '''Donating %.1f%% of work towards P2Pool's development. Please donate to encourage further development of P2Pool!''' % (args.donation_percentage,)
else:
print '''Donating %.1f%% of work towards P2Pool's development. Thank you!''' % (args.donation_percentage,)
sys.stderr.write, 'Watchdog timer went off at:\n' + ''.join(traceback.format_stack())
))
signal.siginterrupt(signal.SIGALRM, False)
- task.LoopingCall(signal.alarm, 30).start(1)
+ deferral.RobustLoopingCall(signal.alarm, 30).start(1)
if args.irc_announce:
from twisted.words.protocols import irc
print repr(line)
irc.IRCClient.lineReceived(self, line)
def signedOn(self):
+ self.in_channel = False
irc.IRCClient.signedOn(self)
self.factory.resetDelay()
self.join(self.channel)
@defer.inlineCallbacks
def new_share(share):
+ if not self.in_channel:
+ return
if share.pow_hash <= share.header['bits'].target and abs(share.timestamp - time.time()) < 10*60:
yield deferral.sleep(random.expovariate(1/60))
message = '\x02%s BLOCK FOUND by %s! %s%064x' % (net.NAME.upper(), bitcoin_data.script2_to_address(share.new_script, net.PARENT), net.PARENT.BLOCK_EXPLORER_URL_PREFIX, share.header_hash)
- if message not in self.recent_messages:
+ if all('%x' % (share.header_hash,) not in old_message for old_message in self.recent_messages):
self.say(self.channel, message)
self._remember_message(message)
- self.watch_id = tracker.verified.added.watch(new_share)
+ self.watch_id = node.tracker.verified.added.watch(new_share)
self.recent_messages = []
+ def joined(self, channel):
+ self.in_channel = True
+ def left(self, channel):
+ self.in_channel = False
def _remember_message(self, message):
self.recent_messages.append(message)
while len(self.recent_messages) > 100:
if channel == self.channel:
self._remember_message(message)
def connectionLost(self, reason):
- tracker.verified.added.unwatch(self.watch_id)
+ node.tracker.verified.added.unwatch(self.watch_id)
print 'IRC connection lost:', reason.getErrorMessage()
class IRCClientFactory(protocol.ReconnectingClientFactory):
protocol = IRCClient
- reactor.connectTCP("irc.freenode.net", 6667, IRCClientFactory())
+ reactor.connectTCP("irc.freenode.net", 6667, IRCClientFactory(), bindAddress=(worker_endpoint[0], 0))
@defer.inlineCallbacks
def status_thread():
while True:
yield deferral.sleep(3)
try:
- if time.time() > current_work2.value['last_update'] + 60:
- print >>sys.stderr, '''---> LOST CONTACT WITH BITCOIND for %s! Check that it isn't frozen or dead! <---''' % (math.format_dt(time.time() - current_work2.value['last_update']),)
-
- height = tracker.get_height(current_work.value['best_share_hash'])
+ height = node.tracker.get_height(node.best_share_var.value)
this_str = 'P2Pool: %i shares in chain (%i verified/%i total) Peers: %i (%i incoming)' % (
height,
- len(tracker.verified.shares),
- len(tracker.shares),
- len(p2p_node.peers),
- sum(1 for peer in p2p_node.peers.itervalues() if peer.incoming),
+ len(node.tracker.verified.items),
+ len(node.tracker.items),
+ len(node.p2p_node.peers),
+ sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming),
) + (' FDs: %i R/%i W' % (len(reactor.getReaders()), len(reactor.getWriters())) if p2pool.DEBUG else '')
- datums, dt = local_rate_monitor.get_datums_in_last()
+ datums, dt = wb.local_rate_monitor.get_datums_in_last()
my_att_s = sum(datum['work']/dt for datum in datums)
+ my_shares_per_s = sum(datum['work']/dt/bitcoin_data.target_to_average_attempts(datum['share_target']) for datum in datums)
this_str += '\n Local: %sH/s in last %s Local dead on arrival: %s Expected time to share: %s' % (
math.format(int(my_att_s)),
math.format_dt(dt),
math.format_binomial_conf(sum(1 for datum in datums if datum['dead']), len(datums), 0.95),
- math.format_dt(2**256 / tracker.shares[current_work.value['best_share_hash']].max_target / my_att_s) if my_att_s and current_work.value['best_share_hash'] else '???',
+ math.format_dt(1/my_shares_per_s) if my_shares_per_s else '???',
)
if height > 2:
- (stale_orphan_shares, stale_doa_shares), shares, _ = get_stale_counts()
- stale_prop = p2pool_data.get_average_stale_prop(tracker, current_work.value['best_share_hash'], min(60*60//net.SHARE_PERIOD, height))
- real_att_s = p2pool_data.get_pool_attempts_per_second(tracker, current_work.value['best_share_hash'], min(height - 1, 60*60//net.SHARE_PERIOD)) / (1 - stale_prop)
+ (stale_orphan_shares, stale_doa_shares), shares, _ = wb.get_stale_counts()
+ stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, min(60*60//net.SHARE_PERIOD, height))
+ real_att_s = p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, min(height - 1, 60*60//net.SHARE_PERIOD)) / (1 - stale_prop)
this_str += '\n Shares: %i (%i orphan, %i dead) Stale rate: %s Efficiency: %s Current payout: %.4f %s' % (
shares, stale_orphan_shares, stale_doa_shares,
math.format_binomial_conf(stale_orphan_shares + stale_doa_shares, shares, 0.95),
math.format_binomial_conf(stale_orphan_shares + stale_doa_shares, shares, 0.95, lambda x: (1 - x)/(1 - stale_prop)),
- get_current_txouts().get(bitcoin_data.pubkey_hash_to_script2(my_pubkey_hash), 0)*1e-8, net.PARENT.SYMBOL,
+ node.get_current_txouts().get(bitcoin_data.pubkey_hash_to_script2(my_pubkey_hash), 0)*1e-6, net.PARENT.SYMBOL,
)
this_str += '\n Pool: %sH/s Stale rate: %.1f%% Expected time to block: %s' % (
math.format(int(real_att_s)),
100*stale_prop,
- math.format_dt(2**256 / current_work.value['bits'].target / real_att_s),
+ math.format_dt(2**256 / node.bitcoind_work.value['bits'].target / real_att_s),
)
- for warning in p2pool_data.get_warnings(tracker, current_work, net):
+ for warning in p2pool_data.get_warnings(node.tracker, node.best_share_var.value, net, bitcoind_getinfo_var.value, node.bitcoind_work.value):
print >>sys.stderr, '#'*40
print >>sys.stderr, '>>> Warning: ' + warning
print >>sys.stderr, '#'*40
+
+ if gc.garbage:
+ print '%i pieces of uncollectable cyclic garbage! Types: %r' % (len(gc.garbage), map(type, gc.garbage))
if this_str != last_str or time.time() > last_time + 15:
print this_str
log.err(None, 'Fatal error:')
def run():
- class FixedArgumentParser(argparse.ArgumentParser):
- def _read_args_from_files(self, arg_strings):
- # expand arguments referencing files
- new_arg_strings = []
- for arg_string in arg_strings:
-
- # for regular arguments, just add them back into the list
- if not arg_string or arg_string[0] not in self.fromfile_prefix_chars:
- new_arg_strings.append(arg_string)
-
- # replace arguments referencing files with the file content
- else:
- try:
- args_file = open(arg_string[1:])
- try:
- arg_strings = []
- for arg_line in args_file.read().splitlines():
- for arg in self.convert_arg_line_to_args(arg_line):
- arg_strings.append(arg)
- arg_strings = self._read_args_from_files(arg_strings)
- new_arg_strings.extend(arg_strings)
- finally:
- args_file.close()
- except IOError:
- err = sys.exc_info()[1]
- self.error(str(err))
-
- # return the modified argument list
- return new_arg_strings
-
- def convert_arg_line_to_args(self, arg_line):
- return [arg for arg in arg_line.split() if arg.strip()]
-
+ if not hasattr(tcp.Client, 'abortConnection'):
+ print "Twisted doesn't have abortConnection! Upgrade to a newer version of Twisted to avoid memory leaks!"
+ print 'Pausing for 3 seconds...'
+ time.sleep(3)
- realnets=dict((name, net) for name, net in networks.nets.iteritems() if '_testnet' not in name)
+ realnets = dict((name, net) for name, net in networks.nets.iteritems() if '_testnet' not in name)
- parser = FixedArgumentParser(description='p2pool (version %s)' % (p2pool.__version__,), fromfile_prefix_chars='@')
+ parser = fixargparse.FixedArgumentParser(description='p2pool (version %s)' % (p2pool.__version__,), fromfile_prefix_chars='@')
parser.add_argument('--version', action='version', version=p2pool.__version__)
parser.add_argument('--net',
- help='use specified network (default: bitcoin)',
- action='store', choices=sorted(realnets), default='bitcoin', dest='net_name')
+ help='use specified network (default: novacoin)',
+ action='store', choices=sorted(realnets), default='novacoin', dest='net_name')
parser.add_argument('--testnet',
help='''use the network's testnet''',
action='store_const', const=True, default=False, dest='testnet')
help='call getauxblock on this url to get work for merged mining (example: http://ncuser:ncpass@127.0.0.1:10332/)',
type=str, action='append', default=[], dest='merged_urls')
parser.add_argument('--give-author', metavar='DONATION_PERCENTAGE',
- help='donate this percentage of work towards the development of p2pool (default: 0.5)',
- type=float, action='store', default=0.5, dest='donation_percentage')
+ help='donate this percentage of work towards the development of p2pool (default: 1.0)',
+ type=float, action='store', default=1.0, dest='donation_percentage')
parser.add_argument('--iocp',
help='use Windows IOCP API in order to avoid errors due to large number of sockets being open',
action='store_true', default=False, dest='iocp')
p2pool_group.add_argument('--max-conns', metavar='CONNS',
help='maximum incoming connections (default: 40)',
type=int, action='store', default=40, dest='p2pool_conns')
+ p2pool_group.add_argument('--outgoing-conns', metavar='CONNS',
+ help='outgoing connections (default: 6)',
+ type=int, action='store', default=6, dest='p2pool_outgoing_conns')
+ parser.add_argument('--disable-advertise',
+ help='''don't advertise local IP address as being available for incoming connections. useful for running a dark node, along with multiple -n ADDR's and --outgoing-conns 0''',
+ action='store_false', default=True, dest='advertise_ip')
worker_group = parser.add_argument_group('worker interface')
worker_group.add_argument('-w', '--worker-port', metavar='PORT or ADDR:PORT',
bitcoind_group.add_argument('--bitcoind-rpc-port', metavar='BITCOIND_RPC_PORT',
help='''connect to JSON-RPC interface at this port (default: %s <read from bitcoin.conf if password not provided>)''' % ', '.join('%s:%i' % (name, net.PARENT.RPC_PORT) for name, net in sorted(realnets.items())),
type=int, action='store', default=None, dest='bitcoind_rpc_port')
+ bitcoind_group.add_argument('--bitcoind-rpc-ssl',
+ help='connect to JSON-RPC interface using SSL',
+ action='store_true', default=False, dest='bitcoind_rpc_ssl')
bitcoind_group.add_argument('--bitcoind-p2p-port', metavar='BITCOIND_P2P_PORT',
help='''connect to P2P interface at this port (default: %s <read from bitcoin.conf if password not provided>)''' % ', '.join('%s:%i' % (name, net.PARENT.P2P_PORT) for name, net in sorted(realnets.items())),
type=int, action='store', default=None, dest='bitcoind_p2p_port')
if args.debug:
p2pool.DEBUG = True
+ defer.setDebugging(True)
+ else:
+ p2pool.DEBUG = False
net_name = args.net_name + ('_testnet' if args.testnet else '')
net = networks.nets[net_name]
args.bitcoind_rpc_username, args.bitcoind_rpc_password = ([None, None] + args.bitcoind_rpc_userpass)[-2:]
if args.bitcoind_rpc_password is None:
- if not hasattr(net.PARENT, 'CONF_FILE_FUNC'):
- parser.error('This network has no configuration file function. Manually enter your RPC password.')
conf_path = net.PARENT.CONF_FILE_FUNC()
if not os.path.exists(conf_path):
parser.error('''Bitcoin configuration file not found. Manually enter your RPC password.\r\n'''
'''If you actually haven't created a configuration file, you should create one at %s with the text:\r\n'''
'''\r\n'''
'''server=1\r\n'''
- '''rpcpassword=%x''' % (conf_path, random.randrange(2**128)))
- with open(conf_path, 'rb') as f:
- cp = ConfigParser.RawConfigParser()
- cp.readfp(StringIO.StringIO('[x]\r\n' + f.read()))
- for conf_name, var_name, var_type in [
- ('rpcuser', 'bitcoind_rpc_username', str),
- ('rpcpassword', 'bitcoind_rpc_password', str),
- ('rpcport', 'bitcoind_rpc_port', int),
- ('port', 'bitcoind_p2p_port', int),
- ]:
- if getattr(args, var_name) is None and cp.has_option('x', conf_name):
- setattr(args, var_name, var_type(cp.get('x', conf_name)))
+ '''rpcpassword=%x\r\n'''
+ '''\r\n'''
+ '''Keep that password secret! After creating the file, restart Bitcoin.''' % (conf_path, random.randrange(2**128)))
+ conf = open(conf_path, 'rb').read()
+ contents = {}
+ for line in conf.splitlines(True):
+ if '#' in line:
+ line = line[:line.index('#')]
+ if '=' not in line:
+ continue
+ k, v = line.split('=', 1)
+ contents[k.strip()] = v.strip()
+ for conf_name, var_name, var_type in [
+ ('rpcuser', 'bitcoind_rpc_username', str),
+ ('rpcpassword', 'bitcoind_rpc_password', str),
+ ('rpcport', 'bitcoind_rpc_port', int),
+ ('port', 'bitcoind_p2p_port', int),
+ ]:
+ if getattr(args, var_name) is None and conf_name in contents:
+ setattr(args, var_name, var_type(contents[conf_name]))
if args.bitcoind_rpc_password is None:
parser.error('''Bitcoin configuration file didn't contain an rpcpassword= line! Add one!''')
if args.p2pool_port is None:
args.p2pool_port = net.P2P_PORT
+ if args.p2pool_outgoing_conns > 10:
+ parser.error('''--outgoing-conns can't be more than 10''')
+
if args.worker_endpoint is None:
worker_endpoint = '', net.WORKER_PORT
elif ':' not in args.worker_endpoint:
logfile.reopen()
print '...and reopened %r after catching SIGUSR1.' % (args.logfile,)
signal.signal(signal.SIGUSR1, sigusr1)
- task.LoopingCall(logfile.reopen).start(5)
+ deferral.RobustLoopingCall(logfile.reopen).start(5)
class ErrorReporter(object):
def __init__(self):