From 5cc4f82ecf4729c29cb62adc9977d0945dd7e510 Mon Sep 17 00:00:00 2001 From: Forrest Voight Date: Sat, 11 Feb 2012 19:20:20 -0500 Subject: [PATCH] added RateMonitor class to clean up rate queue handling in main.py --- p2pool/main.py | 36 ++++++++++++++++-------------------- p2pool/util/math.py | 30 ++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 20 deletions(-) diff --git a/p2pool/main.py b/p2pool/main.py index 12dfea3..1dc5fd1 100644 --- a/p2pool/main.py +++ b/p2pool/main.py @@ -450,7 +450,7 @@ def main(args, net, datadir_path, merged_urls, worker_endpoint): return (my_shares_not_in_chain - my_doa_shares_not_in_chain, my_doa_shares_not_in_chain), my_shares, (orphans_recorded_in_chain, doas_recorded_in_chain) - recent_shares_ts_work2 = [] + local_rate_monitor = math.RateMonitor(10*60) class WorkerBridge(worker_interface.WorkerBridge): def __init__(self): @@ -629,7 +629,7 @@ def main(args, net, datadir_path, merged_urls, worker_endpoint): self.recent_shares_ts_work.append((time.time(), bitcoin_data.target_to_average_attempts(target))) while len(self.recent_shares_ts_work) > 50: self.recent_shares_ts_work.pop(0) - recent_shares_ts_work2.append((time.time(), bitcoin_data.target_to_average_attempts(target), not on_time, request.getUser())) + local_rate_monitor.add_datum(dict(work=bitcoin_data.target_to_average_attempts(target), dead=not on_time, user=request.getUser())) if pow_hash > target: @@ -739,10 +739,11 @@ def main(args, net, datadir_path, merged_urls, worker_endpoint): miner_hash_rates = {} miner_dead_hash_rates = {} - for ts, work, dead, user in recent_shares_ts_work2: - miner_hash_rates[user] = miner_hash_rates.get(user, 0) + work/600 - if dead: - miner_dead_hash_rates[user] = miner_hash_rates.get(user, 0) + work/600 + datums, dt = local_rate_monitor.get_datums_in_last() + for datum in datums: + miner_hash_rates[datum['user']] = miner_hash_rates.get(datum['user'], 0) + datum['work']/dt + if datum['dead']: + miner_dead_hash_rates[datum['user']] = miner_dead_hash_rates.get(datum['user'], 0) + datum['work']/dt return json.dumps(dict( my_hash_rates_in_last_hour=dict( @@ -819,10 +820,11 @@ def main(args, net, datadir_path, merged_urls, worker_endpoint): miner_hash_rates = {} miner_dead_hash_rates = {} - for ts, work, dead, user in recent_shares_ts_work2: - miner_hash_rates[user] = miner_hash_rates.get(user, 0) + work/600 - if dead: - miner_dead_hash_rates[user] = miner_hash_rates.get(user, 0) + work/600 + datums, dt = local_rate_monitor.get_datums_in_last() + for datum in datums: + miner_hash_rates[datum['user']] = miner_hash_rates.get(datum['user'], 0) + datum['work']/dt + if datum['dead']: + miner_dead_hash_rates[datum['user']] = miner_dead_hash_rates.get(datum['user'], 0) + datum['work']/dt stat_log.append(dict( time=time.time(), @@ -915,9 +917,6 @@ def main(args, net, datadir_path, merged_urls, worker_endpoint): @defer.inlineCallbacks def status_thread(): - average_period = 600 - first_pseudoshare_time = None - last_str = None last_time = 0 while True: @@ -935,15 +934,12 @@ def main(args, net, datadir_path, merged_urls, worker_endpoint): sum(1 for peer in p2p_node.peers.itervalues() if peer.incoming), ) + (' FDs: %i R/%i W' % (len(reactor.getReaders()), len(reactor.getWriters())) if p2pool.DEBUG else '') - if first_pseudoshare_time is None and recent_shares_ts_work2: - first_pseudoshare_time = recent_shares_ts_work2[0][0] - while recent_shares_ts_work2 and recent_shares_ts_work2[0][0] < time.time() - average_period: - recent_shares_ts_work2.pop(0) - my_att_s = sum(work for ts, work, dead, user in recent_shares_ts_work2)/min(time.time() - first_pseudoshare_time, average_period) if first_pseudoshare_time is not None else 0 + datums, dt = local_rate_monitor.get_datums_in_last() + my_att_s = sum(datum['work']/dt for datum in datums) this_str += '\n Local: %sH/s in last %s Local dead on arrival: %s Expected time to share: %s' % ( math.format(int(my_att_s)), - math.format_dt(min(time.time() - first_pseudoshare_time, average_period) if first_pseudoshare_time is not None else 0), - math.format_binomial_conf(sum(1 for tx, work, dead, user in recent_shares_ts_work2 if dead), len(recent_shares_ts_work2), 0.95), + math.format_dt(dt), + math.format_binomial_conf(sum(1 for datum in datums if datum['dead']), len(datums), 0.95), math.format_dt(2**256 / tracker.shares[current_work.value['best_share_hash']].target / my_att_s) if my_att_s else '???', ) diff --git a/p2pool/util/math.py b/p2pool/util/math.py index 8e73e3c..4482f33 100644 --- a/p2pool/util/math.py +++ b/p2pool/util/math.py @@ -3,6 +3,7 @@ from __future__ import absolute_import, division import __builtin__ import math import random +import time def median(x, use_float=True): # there exist better algorithms... @@ -218,6 +219,35 @@ def string_to_natural(s, alphabet=None): assert not s.startswith(alphabet[0]) return sum(alphabet.index(char) * len(alphabet)**i for i, char in enumerate(reversed(s))) +class RateMonitor(object): + def __init__(self, max_lookback_time): + self.max_lookback_time = max_lookback_time + + self.datums = [] + self.first_timestamp = None + + def _prune(self): + start_time = time.time() - self.max_lookback_time + for i, (ts, datum) in enumerate(self.datums): + if ts > start_time: + self.datums[:] = self.datums[i:] + return + + def get_datums_in_last(self, dt=None): + if dt is None: + dt = self.max_lookback_time + assert dt <= self.max_lookback_time + self._prune() + now = time.time() + return [datum for ts, datum in self.datums if ts > now - dt], min(dt, now - self.first_timestamp) if self.first_timestamp is not None else 0 + + def add_datum(self, datum): + self._prune() + t = time.time() + self.datums.append((t, datum)) + if self.first_timestamp is None: + self.first_timestamp = t + if __name__ == '__main__': import random a = 1 -- 1.7.1