#!/usr/bin/env python
-# Copyright(C) 2011 thomasv@gitorious
+# Copyright(C) 2012 thomasv@gitorious
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
import time, json, socket, operator, thread, ast, sys,re
-import psycopg2, binascii
-from Abe.abe import hash_to_address, decode_check_address
-from Abe.DataStore import DataStore as Datastore_class
-from Abe import DataStore, readconf, BCDataStream, deserialize, util, base58
import ConfigParser
from json import dumps, loads
import urllib
-# we need to import electrum
-sys.path.append('../client/')
-from wallet import Wallet
-from interface import Interface
-
config = ConfigParser.ConfigParser()
# set some defaults, which will be overwritten by the config file
config.add_section('server')
config.set('server','banner', 'Welcome to Electrum!')
config.set('server', 'host', 'localhost')
-config.set('server', 'port', 50000)
+config.set('server', 'port', '50000')
config.set('server', 'password', '')
config.set('server', 'irc', 'yes')
config.set('server', 'ircname', 'Electrum server')
except:
pass
+
password = config.get('server','password')
-bitcoind_url = 'http://%s:%s@%s:%s/' % ( config.get('bitcoind','user'), config.get('bitcoind','password'), config.get('bitcoind','host'), config.get('bitcoind','port'))
stopping = False
block_number = -1
sessions = {}
sessions_sub_numblocks = {} # sessions that have subscribed to the service
-dblock = thread.allocate_lock()
+m_sessions = [{}] # served by http
+
peer_list = {}
wallets = {} # for ultra-light clients such as bccapi
output_queue = Queue()
address_queue = Queue()
-class MyStore(Datastore_class):
-
- def import_block(self, b, chain_ids=frozenset()):
- block_id = super(MyStore, self).import_block(b, chain_ids)
- #print "block", block_id
- for pos in xrange(len(b['transactions'])):
- tx = b['transactions'][pos]
- if 'hash' not in tx:
- tx['hash'] = util.double_sha256(tx['tx'])
- tx_id = store.tx_find_id_and_value(tx)
- if tx_id:
- self.update_tx_cache(tx_id)
- else:
- print "error: import_block: no tx_id"
- return block_id
-
-
- def update_tx_cache(self, txid):
- inrows = self.get_tx_inputs(txid, False)
- for row in inrows:
- _hash = store.binout(row[6])
- address = hash_to_address(chr(0), _hash)
- if self.tx_cache.has_key(address):
- print "cache: invalidating", address
- self.tx_cache.pop(address)
- address_queue.put(address)
-
- outrows = self.get_tx_outputs(txid, False)
- for row in outrows:
- _hash = store.binout(row[6])
- address = hash_to_address(chr(0), _hash)
- if self.tx_cache.has_key(address):
- print "cache: invalidating", address
- self.tx_cache.pop(address)
- address_queue.put(address)
-
- def safe_sql(self,sql, params=(), lock=True):
- try:
- if lock: dblock.acquire()
- ret = self.selectall(sql,params)
- if lock: dblock.release()
- return ret
- except:
- print "sql error", sql
- return []
-
- def get_tx_outputs(self, tx_id, lock=True):
- return self.safe_sql("""SELECT
- txout.txout_pos,
- txout.txout_scriptPubKey,
- txout.txout_value,
- nexttx.tx_hash,
- nexttx.tx_id,
- txin.txin_pos,
- pubkey.pubkey_hash
- FROM txout
- LEFT JOIN txin ON (txin.txout_id = txout.txout_id)
- LEFT JOIN pubkey ON (pubkey.pubkey_id = txout.pubkey_id)
- LEFT JOIN tx nexttx ON (txin.tx_id = nexttx.tx_id)
- WHERE txout.tx_id = %d
- ORDER BY txout.txout_pos
- """%(tx_id), (), lock)
-
- def get_tx_inputs(self, tx_id, lock=True):
- return self.safe_sql(""" SELECT
- txin.txin_pos,
- txin.txin_scriptSig,
- txout.txout_value,
- COALESCE(prevtx.tx_hash, u.txout_tx_hash),
- prevtx.tx_id,
- COALESCE(txout.txout_pos, u.txout_pos),
- pubkey.pubkey_hash
- FROM txin
- LEFT JOIN txout ON (txout.txout_id = txin.txout_id)
- LEFT JOIN pubkey ON (pubkey.pubkey_id = txout.pubkey_id)
- LEFT JOIN tx prevtx ON (txout.tx_id = prevtx.tx_id)
- LEFT JOIN unlinked_txin u ON (u.txin_id = txin.txin_id)
- WHERE txin.tx_id = %d
- ORDER BY txin.txin_pos
- """%(tx_id,), (), lock)
-
- def get_address_out_rows(self, dbhash):
- return self.safe_sql(""" SELECT
- b.block_nTime,
- cc.chain_id,
- b.block_height,
- 1,
- b.block_hash,
- tx.tx_hash,
- tx.tx_id,
- txin.txin_pos,
- -prevout.txout_value
- FROM chain_candidate cc
- JOIN block b ON (b.block_id = cc.block_id)
- JOIN block_tx ON (block_tx.block_id = b.block_id)
- JOIN tx ON (tx.tx_id = block_tx.tx_id)
- JOIN txin ON (txin.tx_id = tx.tx_id)
- JOIN txout prevout ON (txin.txout_id = prevout.txout_id)
- JOIN pubkey ON (pubkey.pubkey_id = prevout.pubkey_id)
- WHERE pubkey.pubkey_hash = ?
- AND cc.in_longest = 1""", (dbhash,))
-
- def get_address_out_rows_memorypool(self, dbhash):
- return self.safe_sql(""" SELECT
- 1,
- tx.tx_hash,
- tx.tx_id,
- txin.txin_pos,
- -prevout.txout_value
- FROM tx
- JOIN txin ON (txin.tx_id = tx.tx_id)
- JOIN txout prevout ON (txin.txout_id = prevout.txout_id)
- JOIN pubkey ON (pubkey.pubkey_id = prevout.pubkey_id)
- WHERE pubkey.pubkey_hash = ? """, (dbhash,))
-
- def get_address_in_rows(self, dbhash):
- return self.safe_sql(""" SELECT
- b.block_nTime,
- cc.chain_id,
- b.block_height,
- 0,
- b.block_hash,
- tx.tx_hash,
- tx.tx_id,
- txout.txout_pos,
- txout.txout_value
- FROM chain_candidate cc
- JOIN block b ON (b.block_id = cc.block_id)
- JOIN block_tx ON (block_tx.block_id = b.block_id)
- JOIN tx ON (tx.tx_id = block_tx.tx_id)
- JOIN txout ON (txout.tx_id = tx.tx_id)
- JOIN pubkey ON (pubkey.pubkey_id = txout.pubkey_id)
- WHERE pubkey.pubkey_hash = ?
- AND cc.in_longest = 1""", (dbhash,))
-
- def get_address_in_rows_memorypool(self, dbhash):
- return self.safe_sql( """ SELECT
- 0,
- tx.tx_hash,
- tx.tx_id,
- txout.txout_pos,
- txout.txout_value
- FROM tx
- JOIN txout ON (txout.tx_id = tx.tx_id)
- JOIN pubkey ON (pubkey.pubkey_id = txout.pubkey_id)
- WHERE pubkey.pubkey_hash = ? """, (dbhash,))
-
- def get_history(self, addr):
-
- cached_version = self.tx_cache.get( addr )
- if cached_version is not None:
- return cached_version
-
- version, binaddr = decode_check_address(addr)
- if binaddr is None:
- return None
- dbhash = self.binin(binaddr)
- rows = []
- rows += self.get_address_out_rows( dbhash )
- rows += self.get_address_in_rows( dbhash )
- txpoints = []
- known_tx = []
- for row in rows:
- try:
- nTime, chain_id, height, is_in, blk_hash, tx_hash, tx_id, pos, value = row
- except:
- print "cannot unpack row", row
- break
- tx_hash = self.hashout_hex(tx_hash)
- txpoint = {
- "nTime": int(nTime),
- "height": int(height),
- "is_in": int(is_in),
- "blk_hash": self.hashout_hex(blk_hash),
- "tx_hash": tx_hash,
- "tx_id": int(tx_id),
- "pos": int(pos),
- "value": int(value),
- }
-
- txpoints.append(txpoint)
- known_tx.append(self.hashout_hex(tx_hash))
-
-
- # todo: sort them really...
- txpoints = sorted(txpoints, key=operator.itemgetter("nTime"))
-
- # read memory pool
- rows = []
- rows += self.get_address_in_rows_memorypool( dbhash )
- rows += self.get_address_out_rows_memorypool( dbhash )
- address_has_mempool = False
-
- for row in rows:
- is_in, tx_hash, tx_id, pos, value = row
- tx_hash = self.hashout_hex(tx_hash)
- if tx_hash in known_tx:
- continue
-
- # this means that pending transactions were added to the db, even if they are not returned by getmemorypool
- address_has_mempool = True
-
- # this means pending transactions are returned by getmemorypool
- if tx_hash not in self.mempool_keys:
- continue
-
- #print "mempool", tx_hash
- txpoint = {
- "nTime": 0,
- "height": 0,
- "is_in": int(is_in),
- "blk_hash": 'mempool',
- "tx_hash": tx_hash,
- "tx_id": int(tx_id),
- "pos": int(pos),
- "value": int(value),
- }
- txpoints.append(txpoint)
-
-
- for txpoint in txpoints:
- tx_id = txpoint['tx_id']
-
- txinputs = []
- inrows = self.get_tx_inputs(tx_id)
- for row in inrows:
- _hash = self.binout(row[6])
- address = hash_to_address(chr(0), _hash)
- txinputs.append(address)
- txpoint['inputs'] = txinputs
- txoutputs = []
- outrows = self.get_tx_outputs(tx_id)
- for row in outrows:
- _hash = self.binout(row[6])
- address = hash_to_address(chr(0), _hash)
- txoutputs.append(address)
- txpoint['outputs'] = txoutputs
-
- # for all unspent inputs, I want their scriptpubkey. (actually I could deduce it from the address)
- if not txpoint['is_in']:
- # detect if already redeemed...
- for row in outrows:
- if row[6] == dbhash: break
- else:
- raise
- #row = self.get_tx_output(tx_id,dbhash)
- # pos, script, value, o_hash, o_id, o_pos, binaddr = row
- # if not redeemed, we add the script
- if row:
- if not row[4]: txpoint['raw_scriptPubKey'] = row[1]
-
- # cache result
- if not address_has_mempool:
- self.tx_cache[addr] = txpoints
-
- return txpoints
-
-
-
-class Direct_Interface(Interface):
- def __init__(self):
- pass
-
- def handler(self, method, params = ''):
- cmds = {'session.new':new_session,
- 'session.poll':poll_session,
- 'session.update':update_session,
- 'transaction.broadcast':send_tx,
- 'address.get_history':store.get_history
- }
- func = cmds[method]
- return func( params )
-
-
-
-def send_tx(tx):
- postdata = dumps({"method": 'importtransaction', 'params': [tx], 'id':'jsonrpc'})
- respdata = urllib.urlopen(bitcoind_url, postdata).read()
- r = loads(respdata)
- if r['error'] != None:
- out = "error: transaction rejected by memorypool\n"+tx
- else:
- out = r['result']
- return out
-def cmd_stop(data):
+def cmd_stop(_,__,pw):
global stopping
- if password == data:
+ if password == pw:
stopping = True
return 'ok'
else:
return 'wrong password'
-def cmd_load(pw):
+def cmd_load(_,__,pw):
if password == pw:
return repr( len(sessions) )
else:
return 'wrong password'
-def clear_cache(pw):
- if password == pw:
- store.tx_cache = {}
- return 'ok'
- else:
- return 'wrong password'
-def get_cache(pw,addr):
- if password == pw:
- return store.tx_cache.get(addr)
- else:
- return 'wrong password'
-def poll_session(session_id):
- session = sessions.get(session_id)
- if session is None:
- print time.asctime(), "session not found", session_id
- out = repr( (-1, {}))
- else:
+def modified_addresses(session):
+ if 1:
t1 = time.time()
addresses = session['addresses']
session['last_time'] = time.time()
ret = {}
k = 0
for addr in addresses:
- if store.tx_cache.get( addr ) is not None: k += 1
status = get_address_status( addr )
- last_status = addresses.get( addr )
+ msg_id, last_status = addresses.get( addr )
if last_status != status:
- addresses[addr] = status
+ addresses[addr] = msg_id, status
ret[addr] = status
- if ret:
- sessions[session_id]['addresses'] = addresses
- out = repr( (block_number, ret ) )
+
t2 = time.time() - t1
- if t2 > 10:
- print "high load:", session_id, "%d/%d"%(k,len(addresses)), t2
+ #if t2 > 10: print "high load:", session_id, "%d/%d"%(k,len(addresses)), t2
+ return ret, addresses
+
+
+def poll_session(session_id):
+ # native
+ session = sessions.get(session_id)
+ if session is None:
+ print time.asctime(), "session not found", session_id
+ return -1, {}
+ else:
+ ret, addresses = modified_addresses(session)
+ if ret: sessions[session_id]['addresses'] = addresses
+ return repr( (block_number,ret))
+
+
+def poll_session_json(session_id, message_id):
+ session = m_sessions[0].get(session_id)
+ if session is None:
+ raise BaseException("session not found %s"%session_id)
+ else:
+ out = []
+ ret, addresses = modified_addresses(session)
+ if ret:
+ m_sessions[0][session_id]['addresses'] = addresses
+ for addr in ret:
+ msg_id, status = addresses[addr]
+ out.append( { 'id':msg_id, 'result':status } )
+
+ msg_id, last_nb = session.get('numblocks')
+ if last_nb:
+ if last_nb != block_number:
+ m_sessions[0][session_id]['numblocks'] = msg_id, block_number
+ out.append( {'id':msg_id, 'result':block_number} )
return out
def do_update_address(addr):
# an address was involved in a transaction; we check if it was subscribed to in a session
# the address can be subscribed in several sessions; the cache should ensure that we don't do redundant requests
+
for session_id in sessions.keys():
session = sessions[session_id]
if session.get('type') != 'persistent': continue
send_status(session_id,message_id,addr,status)
sessions[session_id]['addresses'][addr] = (message_id,status)
-
def get_address_status(addr):
# get address status, i.e. the last block for that address.
tx_points = store.get_history(addr)
out = json.dumps( { 'id':message_id, 'result':status } )
output_queue.put((session_id, out))
+def address_get_history_json(_,message_id,address):
+ return store.get_history(address)
+
def subscribe_to_numblocks(session_id, message_id):
sessions_sub_numblocks[session_id] = message_id
send_numblocks(session_id)
+def subscribe_to_numblocks_json(session_id, message_id):
+ global m_sessions
+ m_sessions[0][session_id]['numblocks'] = message_id,block_number
+ return block_number
+
def subscribe_to_address(session_id, message_id, address):
status = get_address_status(address)
sessions[session_id]['addresses'][address] = (message_id, status)
sessions[session_id]['last_time'] = time.time()
send_status(session_id, message_id, address, status)
+def add_address_to_session_json(session_id, message_id, address):
+ global m_sessions
+ sessions = m_sessions[0]
+ status = get_address_status(address)
+ sessions[session_id]['addresses'][address] = (message_id, status)
+ sessions[session_id]['last_time'] = time.time()
+ m_sessions[0] = sessions
+ return status
+
def add_address_to_session(session_id, address):
status = get_address_status(address)
- sessions[session_id]['addresses'][addr] = status
+ sessions[session_id]['addresses'][addr] = ("", status)
sessions[session_id]['last_time'] = time.time()
return status
session_id = random_string(10)
sessions[session_id] = { 'addresses':{}, 'version':version }
for a in addresses:
- sessions[session_id]['addresses'][a] = ''
+ sessions[session_id]['addresses'][a] = ('','')
out = repr( (session_id, config.get('server','banner').replace('\\n','\n') ) )
sessions[session_id]['last_time'] = time.time()
return out
+
+def client_version_json(session_id, _, version):
+ global m_sessions
+ sessions = m_sessions[0]
+ sessions[session_id]['version'] = version
+ m_sessions[0] = sessions
+
+def create_session_json(_, __):
+ sessions = m_sessions[0]
+ session_id = random_string(10)
+ print "creating session", session_id
+ sessions[session_id] = { 'addresses':{}, 'numblocks':('','') }
+ sessions[session_id]['last_time'] = time.time()
+ m_sessions[0] = sessions
+ return session_id
+
+
+
+def get_banner(_,__):
+ return config.get('server','banner').replace('\\n','\n')
+
def update_session(session_id,addresses):
+ """deprecated in 0.42"""
sessions[session_id]['addresses'] = {}
for a in addresses:
sessions[session_id]['addresses'][a] = ''
try:
session_id, addr = ast.literal_eval(data)
except:
- print "error"
+ traceback.print_exc(file=sys.stdout)
return None
- return add_address_to_session(session_id,addr)
+ out = add_address_to_session(session_id,addr)
elif cmd=='update_session':
try:
session_id, addresses = ast.literal_eval(data)
except:
- print "error"
+ traceback.print_exc(file=sys.stdout)
return None
print timestr(), "update session", ipaddr, addresses[0] if addresses else addresses, len(addresses)
out = update_session(session_id,addresses)
-
- elif cmd == 'bccapi_login':
- import electrum
- print "data",data
- v, k = ast.literal_eval(data)
- master_public_key = k.decode('hex') # todo: sanitize. no need to decode twice...
- print master_public_key
- wallet_id = random_string(10)
- w = Wallet( Direct_Interface() )
- w.master_public_key = master_public_key.decode('hex')
- w.synchronize()
- wallets[wallet_id] = w
- out = wallet_id
- print "wallets", wallets
-
- elif cmd == 'bccapi_getAccountInfo':
- from wallet import int_to_hex
- v, wallet_id = ast.literal_eval(data)
- w = wallets.get(wallet_id)
- if w is not None:
- num = len(w.addresses)
- c, u = w.get_balance()
- out = int_to_hex(num,4) + int_to_hex(c,8) + int_to_hex( c+u, 8 )
- out = out.decode('hex')
- else:
- print "error",data
- out = "error"
-
- elif cmd == 'bccapi_getAccountStatement':
- from wallet import int_to_hex
- v, wallet_id = ast.literal_eval(data)
- w = wallets.get(wallet_id)
- if w is not None:
- num = len(w.addresses)
- c, u = w.get_balance()
- total_records = num_records = 0
- out = int_to_hex(num,4) + int_to_hex(c,8) + int_to_hex( c+u, 8 ) + int_to_hex( total_records ) + int_to_hex( num_records )
- out = out.decode('hex')
- else:
- print "error",data
- out = "error"
-
- elif cmd == 'bccapi_getSendCoinForm':
- out = ''
-
- elif cmd == 'bccapi_submitTransaction':
- out = ''
elif cmd=='poll':
out = poll_session(data)
out = repr( store.get_history( address ) )
elif cmd == 'load':
- out = cmd_load(data)
+ out = cmd_load(None,None,data)
elif cmd =='tx':
- out = send_tx(data)
+ out = store.send_tx(data)
print timestr(), "sent tx:", ipaddr, out
elif cmd == 'stop':
def close_session(session_id):
- print "lost connection", session_id
+ #print "lost connection", session_id
sessions.pop(session_id)
if session_id in sessions_sub_numblocks:
sessions_sub_numblocks.pop(session_id)
####################################################################
-def memorypool_update(store):
-
- ds = BCDataStream.BCDataStream()
- previous_transactions = store.mempool_keys
- store.mempool_keys = []
-
- postdata = dumps({"method": 'getmemorypool', 'params': [], 'id':'jsonrpc'})
- respdata = urllib.urlopen(bitcoind_url, postdata).read()
- r = loads(respdata)
- if r['error'] != None:
- return
-
- v = r['result'].get('transactions')
- for hextx in v:
- ds.clear()
- ds.write(hextx.decode('hex'))
- tx = deserialize.parse_Transaction(ds)
- tx['hash'] = util.double_sha256(tx['tx'])
- tx_hash = store.hashin(tx['hash'])
-
- store.mempool_keys.append(tx_hash)
- if store.tx_find_id_and_value(tx):
- pass
- else:
- tx_id = store.import_tx(tx, False)
- store.update_tx_cache(tx_id)
-
- store.commit()
def clean_session_thread():
s.close()
+def get_peers_json(_,__):
+ return peer_list.values()
-def http_server_thread(store):
+def http_server_thread():
# see http://code.google.com/p/jsonrpclib/
from SocketServer import ThreadingMixIn
- from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
- class SimpleThreadedJSONRPCServer(ThreadingMixIn, SimpleJSONRPCServer): pass
- server = SimpleThreadedJSONRPCServer(( config.get('server','host'), 8081))
- server.register_function(lambda : peer_list.values(), 'peers')
+ from StratumJSONRPCServer import StratumJSONRPCServer
+ class StratumThreadedJSONRPCServer(ThreadingMixIn, StratumJSONRPCServer): pass
+ server = StratumThreadedJSONRPCServer(( config.get('server','host'), 8081))
+ server.register_function(get_peers_json, 'server.peers')
server.register_function(cmd_stop, 'stop')
server.register_function(cmd_load, 'load')
- server.register_function(lambda : block_number, 'blocks')
- server.register_function(clear_cache, 'clear_cache')
- server.register_function(get_cache, 'get_cache')
- server.register_function(send_tx, 'transaction.broadcast')
- server.register_function(store.get_history, 'address.get_history')
- server.register_function(add_address_to_session, 'address.subscribe')
- server.register_function(new_session, 'session.new')
- server.register_function(update_session, 'session.update')
- server.register_function(poll_session, 'session.poll')
+ server.register_function(get_banner, 'server.banner')
+ server.register_function(lambda a,b,c: store.send_tx(c), 'transaction.broadcast')
+ server.register_function(address_get_history_json, 'address.get_history')
+ server.register_function(add_address_to_session_json, 'address.subscribe')
+ server.register_function(subscribe_to_numblocks_json, 'numblocks.subscribe')
+ server.register_function(client_version_json, 'client.version')
+ server.register_function(create_session_json, 'session.create') # internal message (not part of protocol)
+ server.register_function(poll_session_json, 'session.poll') # internal message (not part of protocol)
server.serve_forever()
if cmd == 'load':
out = server.load(password)
elif cmd == 'peers':
- out = server.peers()
+ out = server.server.peers()
elif cmd == 'stop':
out = server.stop(password)
elif cmd == 'clear_cache':
elif cmd == 'tx':
out = server.transaction.broadcast(sys.argv[2])
elif cmd == 'b':
- out = server.blocks()
+ out = server.numblocks.subscribe()
else:
out = "Unknown command: '%s'" % cmd
print out
sys.exit(0)
- print "starting Electrum server"
-
- conf = DataStore.CONFIG_DEFAULTS
- args, argv = readconf.parse_argv( [], conf)
- args.dbtype= config.get('database','type')
- if args.dbtype == 'sqlite3':
- args.connect_args = { 'database' : config.get('database','database') }
- elif args.dbtype == 'MySQLdb':
- args.connect_args = { 'db' : config.get('database','database'), 'user' : config.get('database','username'), 'passwd' : config.get('database','password') }
- elif args.dbtype == 'psycopg2':
- args.connect_args = { 'database' : config.get('database','database') }
- store = MyStore(args)
- store.tx_cache = {}
- store.mempool_keys = {}
+ # backend
+ import db
+ store = db.MyStore(config,address_queue)
# supported protocols
thread.start_new_thread(native_server_thread, ())
thread.start_new_thread(tcp_server_thread, ())
- thread.start_new_thread(http_server_thread, (store,))
-
+ thread.start_new_thread(http_server_thread, ())
thread.start_new_thread(clean_session_thread, ())
if (config.get('server','irc') == 'yes' ):
thread.start_new_thread(irc_thread, ())
+ print "starting Electrum server"
+
+
while not stopping:
- try:
- dblock.acquire()
- store.catch_up()
- memorypool_update(store)
-
- block_number = store.get_block_number(1)
- if block_number != old_block_number:
- old_block_number = block_number
- for session_id in sessions_sub_numblocks.keys():
- send_numblocks(session_id)
-
- except IOError:
- print "IOError: cannot reach bitcoind"
- block_number = 0
- except:
- traceback.print_exc(file=sys.stdout)
- block_number = 0
- finally:
- dblock.release()
+ block_number = store.main_iteration()
+ if block_number != old_block_number:
+ old_block_number = block_number
+ for session_id in sessions_sub_numblocks.keys():
+ send_numblocks(session_id)
# do addresses
while True:
try:
do_update_address(addr)
time.sleep(10)
-
print "server stopped"