-import struct
-import StringIO
-import hashlib
-
-class EarlyEnd(Exception):
- pass
+from __future__ import division
-class LateEnd(Exception):
- pass
+import hashlib
+import random
+import warnings
-class Type(object):
- # the same data can have only one unpacked representation, but multiple packed binary representations
-
- def _unpack(self, data):
- f = StringIO.StringIO(data)
-
- obj = self.read(f)
-
- if f.tell() != len(data):
- raise LateEnd('underread ' + repr((self, data)))
-
- return obj
-
- def unpack(self, data):
- obj = self._unpack(data)
- assert self._unpack(self._pack(obj)) == obj
- return obj
-
- def _pack(self, obj):
- f = StringIO.StringIO()
-
- self.write(f, obj)
-
- data = f.getvalue()
-
- return data
-
- def pack(self, obj):
- data = self._pack(obj)
- assert self._unpack(data) == obj
- return data
+import p2pool
+from p2pool.util import math, pack
-class VarIntType(Type):
- def read(self, file):
- data = file.read(1)
- if len(data) != 1:
- raise EarlyEnd()
- first, = struct.unpack('<B', data)
- if first == 0xff: desc = '<Q'
- elif first == 0xfe: desc = '<I'
- elif first == 0xfd: desc = '<H'
- else: return first
- length = struct.calcsize(desc)
- data = file.read(length)
- if len(data) != length:
- raise EarlyEnd()
- return struct.unpack(desc, data)[0]
-
- def write(self, file, item):
- if item < 0xfd:
- file.write(struct.pack('<B', item))
- elif item <= 0xffff:
- file.write(struct.pack('<BH', 0xfd, item))
- elif item <= 0xffffffff:
- file.write(struct.pack('<BI', 0xfe, item))
- elif item <= 0xffffffffffffffff:
- file.write(struct.pack('<BQ', 0xff, item))
- else:
- raise ValueError('int too large for varint')
+def hash256(data):
+ return pack.IntType(256).unpack(hashlib.sha256(hashlib.sha256(data).digest()).digest())
-class VarStrType(Type):
- def read(self, file):
- length = VarIntType().read(file)
- res = file.read(length)
- if len(res) != length:
- raise EarlyEnd('var str not long enough %r' % ((length, len(res), res),))
- return res
-
- def write(self, file, item):
- VarIntType().write(file, len(item))
- file.write(item)
+def hash160(data):
+ return pack.IntType(160).unpack(hashlib.new('ripemd160', hashlib.sha256(data).digest()).digest())
-class FixedStrType(Type):
- def __init__(self, length):
- self.length = length
-
- def read(self, file):
- res = file.read(self.length)
- if len(res) != self.length:
- raise EarlyEnd('early EOF!')
- return res
-
- def write(self, file, item):
- if len(item) != self.length:
- raise ValueError('incorrect length!')
- file.write(item)
+def scrypt(data):
+ return pack.IntType(256).unpack(__import__('ltc_scrypt').getPoWHash(data))
-class EnumType(Type):
- def __init__(self, inner, values):
+class ChecksummedType(pack.Type):
+ def __init__(self, inner, checksum_func=lambda data: hashlib.sha256(hashlib.sha256(data).digest()).digest()[:4]):
self.inner = inner
- self.values = values
-
- self.keys = {}
- for k, v in values.iteritems():
- if v in self.keys:
- raise ValueError('duplicate value in values')
- self.keys[v] = k
-
- def read(self, file):
- return self.keys[self.inner.read(file)]
+ self.checksum_func = checksum_func
- def write(self, file, item):
- self.inner.write(file, self.values[item])
-
-class HashType(Type):
def read(self, file):
- data = file.read(256//8)
- if len(data) != 256//8:
- raise EarlyEnd('incorrect length!')
- return int(data[::-1].encode('hex'), 16)
+ obj, file = self.inner.read(file)
+ data = self.inner.pack(obj)
+
+ calculated_checksum = self.checksum_func(data)
+ checksum, file = pack.read(file, len(calculated_checksum))
+ if checksum != calculated_checksum:
+ raise ValueError('invalid checksum')
+
+ return obj, file
def write(self, file, item):
- file.write(('%064x' % (item,)).decode('hex')[::-1])
-
-class ShortHashType(Type):
- def read(self, file):
- data = file.read(160//8)
- if len(data) != 160//8:
- raise EarlyEnd('incorrect length!')
- return int(data[::-1].encode('hex'), 16)
+ data = self.inner.pack(item)
+ return (file, data), self.checksum_func(data)
+
+class FloatingInteger(object):
+ __slots__ = ['bits', '_target']
+
+ @classmethod
+ def from_target_upper_bound(cls, target):
+ n = math.natural_to_string(target)
+ if n and ord(n[0]) >= 128:
+ n = '\x00' + n
+ bits2 = (chr(len(n)) + (n + 3*chr(0))[:3])[::-1]
+ bits = pack.IntType(32).unpack(bits2)
+ return cls(bits)
+
+ def __init__(self, bits, target=None):
+ self.bits = bits
+ self._target = None
+ if target is not None and self.target != target:
+ raise ValueError('target does not match')
+
+ @property
+ def target(self):
+ res = self._target
+ if res is None:
+ res = self._target = math.shift_left(self.bits & 0x00ffffff, 8 * ((self.bits >> 24) - 3))
+ return res
- def write(self, file, item):
- file.write(('%020x' % (item,)).decode('hex')[::-1])
-
-class ListType(Type):
- def __init__(self, type):
- self.type = type
+ def __hash__(self):
+ return hash(self.bits)
- def read(self, file):
- length = VarIntType().read(file)
- return [self.type.read(file) for i in xrange(length)]
+ def __eq__(self, other):
+ return self.bits == other.bits
- def write(self, file, item):
- VarIntType().write(file, len(item))
- for subitem in item:
- self.type.write(file, subitem)
-
-class StructType(Type):
- def __init__(self, desc):
- self.desc = desc
- self.length = struct.calcsize(self.desc)
+ def __ne__(self, other):
+ return not (self == other)
- def read(self, file):
- data = file.read(self.length)
- if len(data) != self.length:
- raise EarlyEnd()
- res, = struct.unpack(self.desc, data)
- return res
+ def __cmp__(self, other):
+ assert False
- def write(self, file, item):
- data = struct.pack(self.desc, item)
- if struct.unpack(self.desc, data)[0] != item:
- # special test because struct doesn't error on some overflows
- raise ValueError("item didn't survive pack cycle (%r)" % (item,))
- file.write(data)
+ def __repr__(self):
+ return 'FloatingInteger(bits=%s, target=%s)' % (hex(self.bits), hex(self.target))
-class IPV6AddressType(Type):
- def read(self, file):
- data = file.read(16)
- if len(data) != 16:
- raise EarlyEnd()
- if data[:12] != '00000000000000000000ffff'.decode('hex'):
- raise ValueError("ipv6 addresses not supported yet")
- return '::ffff:' + '.'.join(str(ord(x)) for x in data[12:])
-
- def write(self, file, item):
- prefix = '::ffff:'
- if not item.startswith(prefix):
- raise ValueError("ipv6 addresses not supported yet")
- item = item[len(prefix):]
- bits = map(int, item.split('.'))
- if len(bits) != 4:
- raise ValueError("invalid address: %r" % (bits,))
- data = '00000000000000000000ffff'.decode('hex') + ''.join(chr(x) for x in bits)
- assert len(data) == 16, len(data)
- file.write(data)
-
-class ComposedType(Type):
- def __init__(self, fields):
- self.fields = fields
+class FloatingIntegerType(pack.Type):
+ _inner = pack.IntType(32)
def read(self, file):
- item = {}
- for key, type_ in self.fields:
- item[key] = type_.read(file)
- return item
+ bits, file = self._inner.read(file)
+ return FloatingInteger(bits), file
def write(self, file, item):
- for key, type_ in self.fields:
- type_.write(file, item[key])
+ return self._inner.write(file, item.bits)
-address_type = ComposedType([
- ('services', StructType('<Q')),
- ('address', IPV6AddressType()),
- ('port', StructType('>H')),
+address_type = pack.ComposedType([
+ ('services', pack.IntType(64)),
+ ('address', pack.IPV6AddressType()),
+ ('port', pack.IntType(16, 'big')),
])
-tx_type = ComposedType([
- ('version', StructType('<I')),
- ('tx_ins', ListType(ComposedType([
- ('previous_output', ComposedType([
- ('hash', HashType()),
- ('index', StructType('<I')),
- ])),
- ('script', VarStrType()),
- ('sequence', StructType('<I')),
+tx_type = pack.ComposedType([
+ ('version', pack.IntType(32)),
+ ('timestamp', pack.IntType(32)), # txn timestamp
+ ('tx_ins', pack.ListType(pack.ComposedType([
+ ('previous_output', pack.PossiblyNoneType(dict(hash=0, index=2**32 - 1), pack.ComposedType([
+ ('hash', pack.IntType(256)),
+ ('index', pack.IntType(32)),
+ ]))),
+ ('script', pack.VarStrType()),
+ ('sequence', pack.PossiblyNoneType(2**32 - 1, pack.IntType(32))),
]))),
- ('tx_outs', ListType(ComposedType([
- ('value', StructType('<Q')),
- ('script', VarStrType()),
+ ('tx_outs', pack.ListType(pack.ComposedType([
+ ('value', pack.IntType(64)),
+ ('script', pack.VarStrType()),
]))),
- ('lock_time', StructType('<I')),
+ ('lock_time', pack.IntType(32)),
])
-block_header_type = ComposedType([
- ('version', StructType('<I')),
- ('previous_block', HashType()),
- ('merkle_root', HashType()),
- ('timestamp', StructType('<I')),
- ('bits', StructType('<I')),
- ('nonce', StructType('<I')),
+merkle_link_type = pack.ComposedType([
+ ('branch', pack.ListType(pack.IntType(256))),
+ ('index', pack.IntType(32)),
])
-block_type = ComposedType([
- ('header', block_header_type),
- ('txs', ListType(tx_type)),
+merkle_tx_type = pack.ComposedType([
+ ('tx', tx_type),
+ ('block_hash', pack.IntType(256)),
+ ('merkle_link', merkle_link_type),
])
-def doublesha(data):
- return HashType().unpack(hashlib.sha256(hashlib.sha256(data).digest()).digest())
-
-def ripemdsha(data):
- return ShortHashType().unpack(hashlib.new('ripemd160', hashlib.sha256(data).digest()).digest())
-
-merkle_record_type = ComposedType([
- ('left', HashType()),
- ('right', HashType()),
+block_header_type = pack.ComposedType([
+ ('version', pack.IntType(32)),
+ ('previous_block', pack.PossiblyNoneType(0, pack.IntType(256))),
+ ('merkle_root', pack.IntType(256)),
+ ('timestamp', pack.IntType(32)),
+ ('bits', FloatingIntegerType()),
+ ('nonce', pack.IntType(32)),
])
-def merkle_hash(tx_list):
- hash_list = [doublesha(tx_type.pack(tx)) for tx in tx_list]
- while len(hash_list) > 1:
- hash_list = [doublesha(merkle_record_type.pack(dict(left=left, right=left if right is None else right)))
- for left, right in zip(hash_list[::2], hash_list[1::2] + [None])]
- return hash_list[0]
-
-def tx_hash(tx):
- return doublesha(tx_type.pack(tx))
-
-def block_hash(header):
- return doublesha(block_header_type.pack(header))
-
-class EarlyEnd(Exception):
- pass
-
-class LateEnd(Exception):
- pass
-
-class Type(object):
- # the same data can have only one unpacked representation, but multiple packed binary representations
-
- def _unpack(self, data):
- f = StringIO.StringIO(data)
-
- obj = self.read(f)
-
- if f.tell() != len(data):
- raise LateEnd('underread ' + repr((self, data)))
-
- return obj
-
- def unpack(self, data):
- obj = self._unpack(data)
- assert self._unpack(self._pack(obj)) == obj
- return obj
-
- def _pack(self, obj):
- f = StringIO.StringIO()
-
- self.write(f, obj)
-
- data = f.getvalue()
-
- return data
-
- def pack(self, obj):
- data = self._pack(obj)
- assert self._unpack(data) == obj
- return data
-
-class VarIntType(Type):
- def read(self, file):
- data = file.read(1)
- if len(data) != 1:
- raise EarlyEnd()
- first, = struct.unpack('<B', data)
- if first == 0xff: desc = '<Q'
- elif first == 0xfe: desc = '<I'
- elif first == 0xfd: desc = '<H'
- else: return first
- length = struct.calcsize(desc)
- data = file.read(length)
- if len(data) != length:
- raise EarlyEnd()
- return struct.unpack(desc, data)[0]
-
- def write(self, file, item):
- if item < 0xfd:
- file.write(struct.pack('<B', item))
- elif item <= 0xffff:
- file.write(struct.pack('<BH', 0xfd, item))
- elif item <= 0xffffffff:
- file.write(struct.pack('<BI', 0xfe, item))
- elif item <= 0xffffffffffffffff:
- file.write(struct.pack('<BQ', 0xff, item))
- else:
- raise ValueError('int too large for varint')
-
-class VarStrType(Type):
- def read(self, file):
- length = VarIntType().read(file)
- res = file.read(length)
- if len(res) != length:
- raise EarlyEnd('var str not long enough %r' % ((length, len(res), res),))
- return res
-
- def write(self, file, item):
- VarIntType().write(file, len(item))
- file.write(item)
-
-class FixedStrType(Type):
- def __init__(self, length):
- self.length = length
-
- def read(self, file):
- res = file.read(self.length)
- if len(res) != self.length:
- raise EarlyEnd('early EOF!')
- return res
-
- def write(self, file, item):
- if len(item) != self.length:
- raise ValueError('incorrect length!')
- file.write(item)
-
-class EnumType(Type):
- def __init__(self, inner, values):
- self.inner = inner
- self.values = values
-
- self.keys = {}
- for k, v in values.iteritems():
- if v in self.keys:
- raise ValueError('duplicate value in values')
- self.keys[v] = k
-
- def read(self, file):
- return self.keys[self.inner.read(file)]
-
- def write(self, file, item):
- self.inner.write(file, self.values[item])
-
-class HashType(Type):
- def read(self, file):
- data = file.read(256//8)
- if len(data) != 256//8:
- raise EarlyEnd('incorrect length!')
- return int(data[::-1].encode('hex'), 16)
-
- def write(self, file, item):
- file.write(('%064x' % (item,)).decode('hex')[::-1])
-
-class ShortHashType(Type):
- def read(self, file):
- data = file.read(160//8)
- if len(data) != 160//8:
- raise EarlyEnd('incorrect length!')
- return int(data[::-1].encode('hex'), 16)
-
- def write(self, file, item):
- file.write(('%020x' % (item,)).decode('hex')[::-1])
-
-class ListType(Type):
- def __init__(self, type):
- self.type = type
-
- def read(self, file):
- length = VarIntType().read(file)
- return [self.type.read(file) for i in xrange(length)]
-
- def write(self, file, item):
- VarIntType().write(file, len(item))
- for subitem in item:
- self.type.write(file, subitem)
-
-class StructType(Type):
- def __init__(self, desc):
- self.desc = desc
- self.length = struct.calcsize(self.desc)
-
- def read(self, file):
- data = file.read(self.length)
- if len(data) != self.length:
- raise EarlyEnd()
- res, = struct.unpack(self.desc, data)
- return res
-
- def write(self, file, item):
- data = struct.pack(self.desc, item)
- if struct.unpack(self.desc, data)[0] != item:
- # special test because struct doesn't error on some overflows
- raise ValueError("item didn't survive pack cycle (%r)" % (item,))
- file.write(data)
-
-class IPV6AddressType(Type):
- def read(self, file):
- data = file.read(16)
- if len(data) != 16:
- raise EarlyEnd()
- if data[:12] != '00000000000000000000ffff'.decode('hex'):
- raise ValueError("ipv6 addresses not supported yet")
- return '::ffff:' + '.'.join(str(ord(x)) for x in data[12:])
-
- def write(self, file, item):
- prefix = '::ffff:'
- if not item.startswith(prefix):
- raise ValueError("ipv6 addresses not supported yet")
- item = item[len(prefix):]
- bits = map(int, item.split('.'))
- if len(bits) != 4:
- raise ValueError("invalid address: %r" % (bits,))
- data = '00000000000000000000ffff'.decode('hex') + ''.join(chr(x) for x in bits)
- assert len(data) == 16, len(data)
- file.write(data)
-
-class ComposedType(Type):
- def __init__(self, fields):
- self.fields = fields
-
- def read(self, file):
- item = {}
- for key, type_ in self.fields:
- item[key] = type_.read(file)
- return item
-
- def write(self, file, item):
- for key, type_ in self.fields:
- type_.write(file, item[key])
-
-address_type = ComposedType([
- ('services', StructType('<Q')),
- ('address', IPV6AddressType()),
- ('port', StructType('>H')),
+block_type = pack.ComposedType([
+ ('header', block_header_type),
+ ('txs', pack.ListType(tx_type)),
+ ('signature', pack.VarStrType()), # header signature field
])
-tx_type = ComposedType([
- ('version', StructType('<I')),
- ('tx_ins', ListType(ComposedType([
- ('previous_output', ComposedType([
- ('hash', HashType()),
- ('index', StructType('<I')),
- ])),
- ('script', VarStrType()),
- ('sequence', StructType('<I')),
- ]))),
- ('tx_outs', ListType(ComposedType([
- ('value', StructType('<Q')),
- ('script', VarStrType()),
- ]))),
- ('lock_time', StructType('<I')),
-])
+# merged mining
-block_header_type = ComposedType([
- ('version', StructType('<I')),
- ('previous_block', HashType()),
- ('merkle_root', HashType()),
- ('timestamp', StructType('<I')),
- ('bits', StructType('<I')),
- ('nonce', StructType('<I')),
+aux_pow_type = pack.ComposedType([
+ ('merkle_tx', merkle_tx_type),
+ ('merkle_link', merkle_link_type),
+ ('parent_block_header', block_header_type),
])
-block_type = ComposedType([
- ('header', block_header_type),
- ('txs', ListType(tx_type)),
+aux_pow_coinbase_type = pack.ComposedType([
+ ('merkle_root', pack.IntType(256, 'big')),
+ ('size', pack.IntType(32)),
+ ('nonce', pack.IntType(32)),
])
-def doublesha(data):
- return HashType().unpack(hashlib.sha256(hashlib.sha256(data).digest()).digest())
+def make_auxpow_tree(chain_ids):
+ for size in (2**i for i in xrange(31)):
+ if size < len(chain_ids):
+ continue
+ res = {}
+ for chain_id in chain_ids:
+ pos = (1103515245 * chain_id + 1103515245 * 12345 + 12345) % size
+ if pos in res:
+ break
+ res[pos] = chain_id
+ else:
+ return res, size
+ raise AssertionError()
-def ripemdsha(data):
- return ShortHashType().unpack(hashlib.new('ripemd160', hashlib.sha256(data).digest()).digest())
+# merkle trees
-merkle_record_type = ComposedType([
- ('left', HashType()),
- ('right', HashType()),
+merkle_record_type = pack.ComposedType([
+ ('left', pack.IntType(256)),
+ ('right', pack.IntType(256)),
])
-def merkle_hash(tx_list):
- hash_list = [doublesha(tx_type.pack(tx)) for tx in tx_list]
+def merkle_hash(hashes):
+ if not hashes:
+ return 0
+ hash_list = list(hashes)
while len(hash_list) > 1:
- hash_list = [doublesha(merkle_record_type.pack(dict(left=left, right=left if right is None else right)))
- for left, right in zip(hash_list[::2], hash_list[1::2] + [None])]
+ hash_list = [hash256(merkle_record_type.pack(dict(left=left, right=right)))
+ for left, right in zip(hash_list[::2], hash_list[1::2] + [hash_list[::2][-1]])]
return hash_list[0]
-def tx_hash(tx):
- return doublesha(tx_type.pack(tx))
-
-def block_hash(header):
- return doublesha(block_header_type.pack(header))
-
-def bits_to_target(bits):
- return (bits & 0x00ffffff) * 2 ** (8 * ((bits >> 24) - 3))
+def calculate_merkle_link(hashes, index):
+ # XXX optimize this
+
+ hash_list = [(lambda _h=h: _h, i == index, []) for i, h in enumerate(hashes)]
+
+ while len(hash_list) > 1:
+ hash_list = [
+ (
+ lambda _left=left, _right=right: hash256(merkle_record_type.pack(dict(left=_left(), right=_right()))),
+ left_f or right_f,
+ (left_l if left_f else right_l) + [dict(side=1, hash=right) if left_f else dict(side=0, hash=left)],
+ )
+ for (left, left_f, left_l), (right, right_f, right_l) in
+ zip(hash_list[::2], hash_list[1::2] + [hash_list[::2][-1]])
+ ]
+
+ res = [x['hash']() for x in hash_list[0][2]]
+
+ assert hash_list[0][1]
+ if p2pool.DEBUG:
+ new_hashes = [random.randrange(2**256) if x is None else x
+ for x in hashes]
+ assert check_merkle_link(new_hashes[index], dict(branch=res, index=index)) == merkle_hash(new_hashes)
+ assert index == sum(k*2**i for i, k in enumerate([1-x['side'] for x in hash_list[0][2]]))
+
+ return dict(branch=res, index=index)
+
+def check_merkle_link(tip_hash, link):
+ if link['index'] >= 2**len(link['branch']):
+ raise ValueError('index too large')
+ return reduce(lambda c, (i, h): hash256(merkle_record_type.pack(
+ dict(left=h, right=c) if (link['index'] >> i) & 1 else
+ dict(left=c, right=h)
+ )), enumerate(link['branch']), tip_hash)
+
+# targets
def target_to_average_attempts(target):
+ assert 0 <= target and isinstance(target, (int, long)), target
+ if target >= 2**256: warnings.warn('target >= 2**256!')
return 2**256//(target + 1)
+
+def average_attempts_to_target(average_attempts):
+ assert average_attempts > 0
+ return min(int(2**256/average_attempts - 1 + 0.5), 2**256-1)
+
+def target_to_difficulty(target):
+ assert 0 <= target and isinstance(target, (int, long)), target
+ if target >= 2**256: warnings.warn('target >= 2**256!')
+ return (0xffff0000 * 2**(256-64) + 1)/(target + 1)
+
+def difficulty_to_target(difficulty):
+ assert difficulty >= 0
+ if difficulty == 0: return 2**256-1
+ return min(int((0xffff0000 * 2**(256-64) + 1)/difficulty - 1 + 0.5), 2**256-1)
+
+# human addresses
+
+base58_alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
+
+def base58_encode(bindata):
+ bindata2 = bindata.lstrip(chr(0))
+ return base58_alphabet[0]*(len(bindata) - len(bindata2)) + math.natural_to_string(math.string_to_natural(bindata2), base58_alphabet)
+
+def base58_decode(b58data):
+ b58data2 = b58data.lstrip(base58_alphabet[0])
+ return chr(0)*(len(b58data) - len(b58data2)) + math.natural_to_string(math.string_to_natural(b58data2, base58_alphabet))
+
+human_address_type = ChecksummedType(pack.ComposedType([
+ ('version', pack.IntType(8)),
+ ('pubkey_hash', pack.IntType(160)),
+]))
+
+def pubkey_hash_to_address(pubkey_hash, net):
+ return base58_encode(human_address_type.pack(dict(version=net.ADDRESS_VERSION, pubkey_hash=pubkey_hash)))
+
+def pubkey_to_address(pubkey, net):
+ return pubkey_hash_to_address(hash160(pubkey), net)
+
+def address_to_pubkey_hash(address, net):
+ x = human_address_type.unpack(base58_decode(address))
+ if x['version'] != net.ADDRESS_VERSION:
+ raise ValueError('address not for this net!')
+ return x['pubkey_hash']
+
+def address_to_script(address, net):
+ x = human_address_type.unpack(base58_decode(address))
+
+ print x['pubkey_hash']
+
+ if x['version'] != net.ADDRESS_VERSION:
+ raise ValueError('address not for this net!')
+ return '\x76\xa9' + ('\x14' + pack.IntType(160).pack(x['pubkey_hash'])) + '\x88\xac'
+
+
+# transactions
+
+def pubkey_to_script2(pubkey):
+ assert len(pubkey) <= 75
+ return (chr(len(pubkey)) + pubkey) + '\xac'
+
+def pubkey_hash_to_script2(pubkey_hash):
+ return '\x76\xa9' + ('\x14' + pack.IntType(160).pack(pubkey_hash)) + '\x88\xac'
+
+def script2_to_address(script2, net):
+ try:
+ pubkey = script2[1:-1]
+ script2_test = pubkey_to_script2(pubkey)
+ except:
+ pass
+ else:
+ if script2_test == script2:
+ return pubkey_to_address(pubkey, net)
+
+ try:
+ pubkey_hash = pack.IntType(160).unpack(script2[3:-2])
+ script2_test2 = pubkey_hash_to_script2(pubkey_hash)
+ except:
+ pass
+ else:
+ if script2_test2 == script2:
+ return pubkey_hash_to_address(pubkey_hash, net)
+
+def script2_to_human(script2, net):
+ try:
+ pubkey = script2[1:-1]
+ script2_test = pubkey_to_script2(pubkey)
+ except:
+ pass
+ else:
+ if script2_test == script2:
+ return 'Pubkey. Address: %s' % (pubkey_to_address(pubkey, net),)
+
+ try:
+ pubkey_hash = pack.IntType(160).unpack(script2[3:-2])
+ script2_test2 = pubkey_hash_to_script2(pubkey_hash)
+ except:
+ pass
+ else:
+ if script2_test2 == script2:
+ return 'Address. Address: %s' % (pubkey_hash_to_address(pubkey_hash, net),)
+
+ return 'Unknown. Script: %s' % (script2.encode('hex'),)