1 from __future__ import absolute_import
2 from __future__ import division
8 from twisted.python import log
11 def _atomic_write(filename, data):
12 open(filename + '.new', 'w').write(data)
13 os.rename(filename + '.new', filename)
16 class DataViewDescription(object):
17 def __init__(self, bin_count, total_width):
18 self.bin_count = bin_count
19 self.bin_width = total_width/bin_count
21 class DataView(object):
22 def __init__(self, desc, ds_desc, last_bin_end, bins):
23 assert len(bins) == desc.bin_count
26 self.ds_desc = ds_desc
27 self.last_bin_end = last_bin_end
30 def _add_datum(self, t, value):
31 shift = max(0, int(math.ceil((t - self.last_bin_end)/self.desc.bin_width)))
32 self.bins = [(0, 0)]*min(shift, self.desc.bin_count) + self.bins[:max(0, len(self.bins) - shift)]
33 self.last_bin_end += shift*self.desc.bin_width
35 bin = int(math.ceil((self.last_bin_end - self.desc.bin_width - t)/self.desc.bin_width))
37 if bin >= self.desc.bin_count:
40 prev_total, prev_count = self.bins[bin]
41 self.bins[bin] = prev_total + value, prev_count + 1
44 return [(self.last_bin_end - self.desc.bin_width*(i + 1/2), (total/count if count else None) if self.ds_desc.source_is_cumulative else total/self.desc.bin_width) for i, (total, count) in enumerate(self.bins)]
47 class DataStreamDescription(object):
48 def __init__(self, source_is_cumulative, dataview_descriptions):
49 self.source_is_cumulative = source_is_cumulative
50 self.dataview_descriptions = dataview_descriptions
52 class DataStream(object):
53 def __init__(self, desc, dataviews):
55 self.dataviews = dataviews
57 def add_datum(self, t, value=1):
58 for dv_name, dv in self.dataviews.iteritems():
59 dv._add_datum(t, value)
62 class HistoryDatabase(object):
64 def from_nothing(cls, datastream_descriptions):
66 (ds_name, DataStream(ds_desc, dict(
67 (dv_name, DataView(dv_desc, ds_desc, 0, dv_desc.bin_count*[(0, 0)]))
68 for dv_name, dv_desc in ds_desc.dataview_descriptions.iteritems()
70 for ds_name, ds_desc in datastream_descriptions.iteritems()
74 def from_file(cls, datastream_descriptions, filename):
76 data = json.loads(open(filename, 'rb').read())
77 except Exception: # XXX
80 def get_dataview(ds_name, ds_desc, dv_name, dv_desc):
82 ds_data = data[ds_name]
83 if dv_name in ds_data:
84 dv_data = ds_data[dv_name]
85 if dv_data['bin_width'] == dv_desc.bin_width and len(dv_data['bins']) == dv_desc.bin_count:
86 return DataView(dv_desc, ds_desc, dv_data['last_bin_end'], dv_data['bins'])
87 return DataView(dv_desc, ds_desc, 0, dv_desc.bin_count*[(0, 0)])
89 (ds_name, DataStream(ds_desc, dict(
90 (dv_name, get_dataview(ds_name, ds_desc, dv_name, dv_desc))
91 for dv_name, dv_desc in ds_desc.dataview_descriptions.iteritems()
93 for ds_name, ds_desc in datastream_descriptions.iteritems()
96 def __init__(self, datastreams):
97 self.datastreams = datastreams
99 def write(self, filename):
100 _atomic_write(filename, json.dumps(
101 dict((ds_name, dict((dv_name, dict(last_bin_end=dv.last_bin_end, bin_width=dv.desc.bin_width, bins=dv.bins))
102 for dv_name, dv in ds.dataviews.iteritems())) for ds_name, ds in self.datastreams.iteritems())