X-Git-Url: https://git.novaco.in/?a=blobdiff_plain;f=server.py;h=6ab05128ecb57176e5f797180d75f7d011676514;hb=70f23254388eb74ca7a05009ccf2a5d7a723ec36;hp=13e4d051f854a9b8cda64a2ab4a55654bc49de01;hpb=8e72b45fdc843543778862a8e37a5c1cd26151e1;p=electrum-server.git diff --git a/server.py b/server.py index 13e4d05..6ab0512 100755 --- a/server.py +++ b/server.py @@ -24,349 +24,8 @@ Todo: mempool transactions do not need to be added to the database; it slows it down """ +import abe_backend -from Abe.abe import hash_to_address, decode_check_address -from Abe.DataStore import DataStore as Datastore_class -from Abe import DataStore, readconf, BCDataStream, deserialize, util, base58 - -import psycopg2, binascii - -import thread, traceback, sys, urllib, operator -from json import dumps, loads - - -class MyStore(Datastore_class): - - def __init__(self, config, address_queue): - conf = DataStore.CONFIG_DEFAULTS - args, argv = readconf.parse_argv( [], conf) - args.dbtype = config.get('database','type') - if args.dbtype == 'sqlite3': - args.connect_args = { 'database' : config.get('database','database') } - elif args.dbtype == 'MySQLdb': - args.connect_args = { 'db' : config.get('database','database'), 'user' : config.get('database','username'), 'passwd' : config.get('database','password') } - elif args.dbtype == 'psycopg2': - args.connect_args = { 'database' : config.get('database','database') } - - Datastore_class.__init__(self,args) - - self.tx_cache = {} - self.mempool_keys = {} - self.bitcoind_url = 'http://%s:%s@%s:%s/' % ( config.get('bitcoind','user'), config.get('bitcoind','password'), config.get('bitcoind','host'), config.get('bitcoind','port')) - - self.address_queue = address_queue - self.dblock = thread.allocate_lock() - - - - def import_block(self, b, chain_ids=frozenset()): - block_id = super(MyStore, self).import_block(b, chain_ids) - for pos in xrange(len(b['transactions'])): - tx = b['transactions'][pos] - if 'hash' not in tx: - tx['hash'] = util.double_sha256(tx['tx']) - tx_id = store.tx_find_id_and_value(tx) - if tx_id: - self.update_tx_cache(tx_id) - else: - print "error: import_block: no tx_id" - return block_id - - - def update_tx_cache(self, txid): - inrows = self.get_tx_inputs(txid, False) - for row in inrows: - _hash = self.binout(row[6]) - address = hash_to_address(chr(0), _hash) - if self.tx_cache.has_key(address): - print "cache: invalidating", address - self.tx_cache.pop(address) - self.address_queue.put(address) - - outrows = self.get_tx_outputs(txid, False) - for row in outrows: - _hash = self.binout(row[6]) - address = hash_to_address(chr(0), _hash) - if self.tx_cache.has_key(address): - print "cache: invalidating", address - self.tx_cache.pop(address) - self.address_queue.put(address) - - def safe_sql(self,sql, params=(), lock=True): - try: - if lock: self.dblock.acquire() - ret = self.selectall(sql,params) - if lock: self.dblock.release() - return ret - except: - print "sql error", sql - return [] - - def get_tx_outputs(self, tx_id, lock=True): - return self.safe_sql("""SELECT - txout.txout_pos, - txout.txout_scriptPubKey, - txout.txout_value, - nexttx.tx_hash, - nexttx.tx_id, - txin.txin_pos, - pubkey.pubkey_hash - FROM txout - LEFT JOIN txin ON (txin.txout_id = txout.txout_id) - LEFT JOIN pubkey ON (pubkey.pubkey_id = txout.pubkey_id) - LEFT JOIN tx nexttx ON (txin.tx_id = nexttx.tx_id) - WHERE txout.tx_id = %d - ORDER BY txout.txout_pos - """%(tx_id), (), lock) - - def get_tx_inputs(self, tx_id, lock=True): - return self.safe_sql(""" SELECT - txin.txin_pos, - txin.txin_scriptSig, - txout.txout_value, - COALESCE(prevtx.tx_hash, u.txout_tx_hash), - prevtx.tx_id, - COALESCE(txout.txout_pos, u.txout_pos), - pubkey.pubkey_hash - FROM txin - LEFT JOIN txout ON (txout.txout_id = txin.txout_id) - LEFT JOIN pubkey ON (pubkey.pubkey_id = txout.pubkey_id) - LEFT JOIN tx prevtx ON (txout.tx_id = prevtx.tx_id) - LEFT JOIN unlinked_txin u ON (u.txin_id = txin.txin_id) - WHERE txin.tx_id = %d - ORDER BY txin.txin_pos - """%(tx_id,), (), lock) - - def get_address_out_rows(self, dbhash): - return self.safe_sql(""" SELECT - b.block_nTime, - cc.chain_id, - b.block_height, - 1, - b.block_hash, - tx.tx_hash, - tx.tx_id, - txin.txin_pos, - -prevout.txout_value - FROM chain_candidate cc - JOIN block b ON (b.block_id = cc.block_id) - JOIN block_tx ON (block_tx.block_id = b.block_id) - JOIN tx ON (tx.tx_id = block_tx.tx_id) - JOIN txin ON (txin.tx_id = tx.tx_id) - JOIN txout prevout ON (txin.txout_id = prevout.txout_id) - JOIN pubkey ON (pubkey.pubkey_id = prevout.pubkey_id) - WHERE pubkey.pubkey_hash = ? - AND cc.in_longest = 1""", (dbhash,)) - - def get_address_out_rows_memorypool(self, dbhash): - return self.safe_sql(""" SELECT - 1, - tx.tx_hash, - tx.tx_id, - txin.txin_pos, - -prevout.txout_value - FROM tx - JOIN txin ON (txin.tx_id = tx.tx_id) - JOIN txout prevout ON (txin.txout_id = prevout.txout_id) - JOIN pubkey ON (pubkey.pubkey_id = prevout.pubkey_id) - WHERE pubkey.pubkey_hash = ? """, (dbhash,)) - - def get_address_in_rows(self, dbhash): - return self.safe_sql(""" SELECT - b.block_nTime, - cc.chain_id, - b.block_height, - 0, - b.block_hash, - tx.tx_hash, - tx.tx_id, - txout.txout_pos, - txout.txout_value - FROM chain_candidate cc - JOIN block b ON (b.block_id = cc.block_id) - JOIN block_tx ON (block_tx.block_id = b.block_id) - JOIN tx ON (tx.tx_id = block_tx.tx_id) - JOIN txout ON (txout.tx_id = tx.tx_id) - JOIN pubkey ON (pubkey.pubkey_id = txout.pubkey_id) - WHERE pubkey.pubkey_hash = ? - AND cc.in_longest = 1""", (dbhash,)) - - def get_address_in_rows_memorypool(self, dbhash): - return self.safe_sql( """ SELECT - 0, - tx.tx_hash, - tx.tx_id, - txout.txout_pos, - txout.txout_value - FROM tx - JOIN txout ON (txout.tx_id = tx.tx_id) - JOIN pubkey ON (pubkey.pubkey_id = txout.pubkey_id) - WHERE pubkey.pubkey_hash = ? """, (dbhash,)) - - def get_history(self, addr): - - cached_version = self.tx_cache.get( addr ) - if cached_version is not None: - return cached_version - - version, binaddr = decode_check_address(addr) - if binaddr is None: - return None - - dbhash = self.binin(binaddr) - rows = [] - rows += self.get_address_out_rows( dbhash ) - rows += self.get_address_in_rows( dbhash ) - - txpoints = [] - known_tx = [] - - for row in rows: - try: - nTime, chain_id, height, is_in, blk_hash, tx_hash, tx_id, pos, value = row - except: - print "cannot unpack row", row - break - tx_hash = self.hashout_hex(tx_hash) - txpoint = { - "nTime": int(nTime), - "height": int(height), - "is_in": int(is_in), - "blk_hash": self.hashout_hex(blk_hash), - "tx_hash": tx_hash, - "tx_id": int(tx_id), - "pos": int(pos), - "value": int(value), - } - - txpoints.append(txpoint) - known_tx.append(self.hashout_hex(tx_hash)) - - - # todo: sort them really... - txpoints = sorted(txpoints, key=operator.itemgetter("nTime")) - - # read memory pool - rows = [] - rows += self.get_address_in_rows_memorypool( dbhash ) - rows += self.get_address_out_rows_memorypool( dbhash ) - address_has_mempool = False - - for row in rows: - is_in, tx_hash, tx_id, pos, value = row - tx_hash = self.hashout_hex(tx_hash) - if tx_hash in known_tx: - continue - - # this means that pending transactions were added to the db, even if they are not returned by getmemorypool - address_has_mempool = True - - # this means pending transactions are returned by getmemorypool - if tx_hash not in self.mempool_keys: - continue - - #print "mempool", tx_hash - txpoint = { - "nTime": 0, - "height": 0, - "is_in": int(is_in), - "blk_hash": 'mempool', - "tx_hash": tx_hash, - "tx_id": int(tx_id), - "pos": int(pos), - "value": int(value), - } - txpoints.append(txpoint) - - - for txpoint in txpoints: - tx_id = txpoint['tx_id'] - - txinputs = [] - inrows = self.get_tx_inputs(tx_id) - for row in inrows: - _hash = self.binout(row[6]) - address = hash_to_address(chr(0), _hash) - txinputs.append(address) - txpoint['inputs'] = txinputs - txoutputs = [] - outrows = self.get_tx_outputs(tx_id) - for row in outrows: - _hash = self.binout(row[6]) - address = hash_to_address(chr(0), _hash) - txoutputs.append(address) - txpoint['outputs'] = txoutputs - - # for all unspent inputs, I want their scriptpubkey. (actually I could deduce it from the address) - if not txpoint['is_in']: - # detect if already redeemed... - for row in outrows: - if row[6] == dbhash: break - else: - raise - #row = self.get_tx_output(tx_id,dbhash) - # pos, script, value, o_hash, o_id, o_pos, binaddr = row - # if not redeemed, we add the script - if row: - if not row[4]: txpoint['raw_scriptPubKey'] = row[1] - - # cache result - if not address_has_mempool: - self.tx_cache[addr] = txpoints - - return txpoints - - - - def memorypool_update(store): - - ds = BCDataStream.BCDataStream() - previous_transactions = store.mempool_keys - store.mempool_keys = [] - - postdata = dumps({"method": 'getmemorypool', 'params': [], 'id':'jsonrpc'}) - - respdata = urllib.urlopen(store.bitcoind_url, postdata).read() - r = loads(respdata) - if r['error'] != None: - return - - v = r['result'].get('transactions') - for hextx in v: - ds.clear() - ds.write(hextx.decode('hex')) - tx = deserialize.parse_Transaction(ds) - tx['hash'] = util.double_sha256(tx['tx']) - tx_hash = store.hashin(tx['hash']) - - def send_tx(self,tx): - postdata = dumps({"method": 'importtransaction', 'params': [tx], 'id':'jsonrpc'}) - respdata = urllib.urlopen(self.bitcoind_url, postdata).read() - r = loads(respdata) - if r['error'] != None: - out = "error: transaction rejected by memorypool\n"+tx - else: - out = r['result'] - return out - - - def main_iteration(store): - try: - store.dblock.acquire() - store.catch_up() - store.memorypool_update() - block_number = store.get_block_number(1) - - except IOError: - print "IOError: cannot reach bitcoind" - block_number = 0 - except: - traceback.print_exc(file=sys.stdout) - block_number = 0 - finally: - store.dblock.release() - - return block_number @@ -374,7 +33,7 @@ import time, json, socket, operator, thread, ast, sys, re, traceback import ConfigParser from json import dumps, loads import urllib - +import threading config = ConfigParser.ConfigParser() # set some defaults, which will be overwritten by the config file @@ -405,26 +64,8 @@ except: password = config.get('server','password') - stopping = False -block_number = -1 sessions = {} -sessions_sub_numblocks = {} # sessions that have subscribed to the service - -m_sessions = [{}] # served by http - -peer_list = {} - -wallets = {} # for ultra-light clients such as bccapi - -from Queue import Queue -input_queue = Queue() -output_queue = Queue() -address_queue = Queue() - - - - @@ -432,43 +73,26 @@ def random_string(N): import random, string return ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(N)) - - -def cmd_stop(_,__,pw): - global stopping - if password == pw: - stopping = True - return 'ok' - else: - return 'wrong password' - -def cmd_load(_,__,pw): - if password == pw: - return repr( len(sessions) ) - else: - return 'wrong password' - - - -def modified_addresses(session): - if 1: - t1 = time.time() - addresses = session['addresses'] - session['last_time'] = time.time() - ret = {} - k = 0 - for addr in addresses: - status = get_address_status( addr ) - msg_id, last_status = addresses.get( addr ) - if last_status != status: - addresses[addr] = msg_id, status - ret[addr] = status +def modified_addresses(a_session): + #t1 = time.time() + import copy + session = copy.deepcopy(a_session) + addresses = session['addresses'] + session['last_time'] = time.time() + ret = {} + k = 0 + for addr in addresses: + status = store.get_status( addr ) + msg_id, last_status = addresses.get( addr ) + if last_status != status: + addresses[addr] = msg_id, status + ret[addr] = status - t2 = time.time() - t1 - #if t2 > 10: print "high load:", session_id, "%d/%d"%(k,len(addresses)), t2 - return ret, addresses + #t2 = time.time() - t1 + #if t2 > 10: print "high load:", session_id, "%d/%d"%(k,len(addresses)), t2 + return ret, addresses def poll_session(session_id): @@ -478,106 +102,19 @@ def poll_session(session_id): print time.asctime(), "session not found", session_id return -1, {} else: + sessions[session_id]['last_time'] = time.time() ret, addresses = modified_addresses(session) if ret: sessions[session_id]['addresses'] = addresses - return repr( (block_number,ret)) - - -def poll_session_json(session_id, message_id): - session = m_sessions[0].get(session_id) - if session is None: - raise BaseException("session not found %s"%session_id) - else: - out = [] - ret, addresses = modified_addresses(session) - if ret: - m_sessions[0][session_id]['addresses'] = addresses - for addr in ret: - msg_id, status = addresses[addr] - out.append( { 'id':msg_id, 'result':status } ) - - msg_id, last_nb = session.get('numblocks') - if last_nb: - if last_nb != block_number: - m_sessions[0][session_id]['numblocks'] = msg_id, block_number - out.append( {'id':msg_id, 'result':block_number} ) - - return out - - -def do_update_address(addr): - # an address was involved in a transaction; we check if it was subscribed to in a session - # the address can be subscribed in several sessions; the cache should ensure that we don't do redundant requests - - for session_id in sessions.keys(): - session = sessions[session_id] - if session.get('type') != 'persistent': continue - addresses = session['addresses'].keys() - - if addr in addresses: - status = get_address_status( addr ) - message_id, last_status = session['addresses'][addr] - if last_status != status: - #print "sending new status for %s:"%addr, status - send_status(session_id,message_id,addr,status) - sessions[session_id]['addresses'][addr] = (message_id,status) - -def get_address_status(addr): - # get address status, i.e. the last block for that address. - tx_points = store.get_history(addr) - if not tx_points: - status = None - else: - lastpoint = tx_points[-1] - status = lastpoint['blk_hash'] - # this is a temporary hack; move it up once old clients have disappeared - if status == 'mempool': # and session['version'] != "old": - status = status + ':%d'% len(tx_points) - return status - - -def send_numblocks(session_id): - message_id = sessions_sub_numblocks[session_id] - out = json.dumps( {'id':message_id, 'result':block_number} ) - output_queue.put((session_id, out)) - -def send_status(session_id, message_id, address, status): - out = json.dumps( { 'id':message_id, 'result':status } ) - output_queue.put((session_id, out)) - -def address_get_history_json(_,message_id,address): - return store.get_history(address) - -def subscribe_to_numblocks(session_id, message_id): - sessions_sub_numblocks[session_id] = message_id - send_numblocks(session_id) - -def subscribe_to_numblocks_json(session_id, message_id): - global m_sessions - m_sessions[0][session_id]['numblocks'] = message_id,block_number - return block_number - -def subscribe_to_address(session_id, message_id, address): - status = get_address_status(address) - sessions[session_id]['addresses'][address] = (message_id, status) - sessions[session_id]['last_time'] = time.time() - send_status(session_id, message_id, address, status) + return repr( (store.block_number,ret)) -def add_address_to_session_json(session_id, message_id, address): - global m_sessions - sessions = m_sessions[0] - status = get_address_status(address) - sessions[session_id]['addresses'][address] = (message_id, status) - sessions[session_id]['last_time'] = time.time() - m_sessions[0] = sessions - return status def add_address_to_session(session_id, address): - status = get_address_status(address) + status = store.get_status(address) sessions[session_id]['addresses'][address] = ("", status) sessions[session_id]['last_time'] = time.time() return status + def new_session(version, addresses): session_id = random_string(10) sessions[session_id] = { 'addresses':{}, 'version':version } @@ -588,34 +125,15 @@ def new_session(version, addresses): return out -def client_version_json(session_id, _, version): - global m_sessions - sessions = m_sessions[0] - sessions[session_id]['version'] = version - m_sessions[0] = sessions - -def create_session_json(_, __): - sessions = m_sessions[0] - session_id = random_string(10) - print "creating session", session_id - sessions[session_id] = { 'addresses':{}, 'numblocks':('','') } - sessions[session_id]['last_time'] = time.time() - m_sessions[0] = sessions - return session_id - - - -def get_banner(_,__): - return config.get('server','banner').replace('\\n','\n') - def update_session(session_id,addresses): - """deprecated in 0.42""" + """deprecated in 0.42, wad replaced by add_address_to_session""" sessions[session_id]['addresses'] = {} for a in addresses: sessions[session_id]['addresses'][a] = '' sessions[session_id]['last_time'] = time.time() return 'ok' + def native_server_thread(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) @@ -707,22 +225,15 @@ def do_command(cmd, data, ipaddr): out = poll_session(data) elif cmd == 'h': - # history address = data out = repr( store.get_history( address ) ) - elif cmd == 'load': - out = cmd_load(None,None,data) - elif cmd =='tx': out = store.send_tx(data) print timestr(), "sent tx:", ipaddr, out - elif cmd == 'stop': - out = cmd_stop(data) - elif cmd == 'peers': - out = repr(peer_list.values()) + out = repr(irc.get_peers()) else: out = None @@ -730,130 +241,63 @@ def do_command(cmd, data, ipaddr): return out - -#################################################################### - -def tcp_server_thread(): - thread.start_new_thread(process_input_queue, ()) - thread.start_new_thread(process_output_queue, ()) - - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - s.bind((config.get('server','host'), 50001)) - s.listen(1) +def clean_session_thread(): while not stopping: - conn, addr = s.accept() - try: - thread.start_new_thread(tcp_client_thread, (addr, conn,)) - except: - # can't start new thread if there is no memory.. - traceback.print_exc(file=sys.stdout) - - -def close_session(session_id): - #print "lost connection", session_id - sessions.pop(session_id) - if session_id in sessions_sub_numblocks: - sessions_sub_numblocks.pop(session_id) - - -# one thread per client. put requests in a queue. -def tcp_client_thread(ipaddr,conn): - """ use a persistent connection. put commands in a queue.""" - - print timestr(), "TCP session", ipaddr - global sessions + time.sleep(30) + t = time.time() + for k,s in sessions.items(): + if s.get('type') == 'persistent': continue + t0 = s['last_time'] + if t - t0 > 5*60: + sessions.pop(k) + print "lost session", k + - session_id = random_string(10) - sessions[session_id] = { 'conn':conn, 'addresses':{}, 'version':'unknown', 'type':'persistent' } +#################################################################### - ipaddr = ipaddr[0] - msg = '' - while not stopping: - try: - d = conn.recv(1024) - except socket.error: - d = '' - if not d: - close_session(session_id) - break - - msg += d - while True: - s = msg.find('\n') - if s ==-1: - break - else: - c = msg[0:s].strip() - msg = msg[s+1:] - if c == 'quit': - conn.close() - close_session(session_id) - return - try: - c = json.loads(c) - except: - print "json error", repr(c) - continue - try: - message_id = c.get('id') - method = c.get('method') - params = c.get('params') - except: - print "syntax error", repr(c), ipaddr - continue - - # add to queue - input_queue.put((session_id, message_id, method, params)) - - - -# read commands from the input queue. perform requests, etc. this should be called from the main thread. -def process_input_queue(): - while not stopping: - session_id, message_id, method, data = input_queue.get() - if session_id not in sessions.keys(): - continue - out = None - if method == 'address.subscribe': - address = data[0] - subscribe_to_address(session_id,message_id,address) - elif method == 'numblocks.subscribe': - subscribe_to_numblocks(session_id,message_id) +from processor import Shared, Processor, Dispatcher +from stratum_http import HttpServer +from stratum import TcpServer + +class AbeProcessor(Processor): + def process(self,request): + message_id = request['id'] + method = request['method'] + params = request.get('params',[]) + #print request + + result = '' + if method == 'numblocks.subscribe': + result = store.block_number + elif method == 'address.subscribe': + address = params[0] + store.watch_address(address) + status = store.get_status(address) + result = status elif method == 'client.version': - sessions[session_id]['version'] = data[0] + #session.version = params[0] + pass elif method == 'server.banner': - out = { 'result':config.get('server','banner').replace('\\n','\n') } + result = config.get('server','banner').replace('\\n','\n') elif method == 'server.peers': - out = { 'result':peer_list.values() } + result = irc.get_peers() elif method == 'address.get_history': - address = data[0] - out = { 'result':store.get_history( address ) } + address = params[0] + result = store.get_history( address ) elif method == 'transaction.broadcast': - postdata = dumps({"method": 'importtransaction', 'params': [data], 'id':'jsonrpc'}) - txo = urllib.urlopen(bitcoind_url, postdata).read() + txo = store.send_tx(params[0]) print "sent tx:", txo - out = json.loads(txo) + result = txo else: - print "unknown command", method - if out: - out['id'] = message_id - out = json.dumps( out ) - output_queue.put((session_id, out)) + print "unknown method", request -# this is a separate thread -def process_output_queue(): - while not stopping: - session_id, out = output_queue.get() - session = sessions.get(session_id) - if session: - try: - conn = session.get('conn') - conn.send(out+'\n') - except: - close_session(session_id) - + if result!='': + response = { 'id':message_id, 'method':method, 'params':params, 'result':result } + self.push_response(response) + + def get_status(self,addr): + return store.get_status(addr) @@ -861,82 +305,59 @@ def process_output_queue(): +class Irc(threading.Thread): + + def __init__(self, processor): + self.processor = processor + threading.Thread.__init__(self) + self.daemon = True + self.peers = {} + + def get_peers(self): + return self.peers.values() + + def run(self): + NICK = 'E_'+random_string(10) + while not self.processor.shared.stopped(): + try: + s = socket.socket() + s.connect(('irc.freenode.net', 6667)) + s.send('USER electrum 0 * :'+config.get('server','host')+' '+config.get('server','ircname')+'\n') + s.send('NICK '+NICK+'\n') + s.send('JOIN #electrum\n') + sf = s.makefile('r', 0) + t = 0 + while not self.processor.shared.stopped(): + line = sf.readline() + line = line.rstrip('\r\n') + line = line.split() + if line[0]=='PING': + s.send('PONG '+line[1]+'\n') + elif '353' in line: # answer to /names + k = line.index('353') + for item in line[k+1:]: + if item[0:2] == 'E_': + s.send('WHO %s\n'%item) + elif '352' in line: # answer to /who + # warning: this is a horrible hack which apparently works + k = line.index('352') + ip = line[k+4] + ip = socket.gethostbyname(ip) + name = line[k+6] + host = line[k+9] + self.peers[name] = (ip,host) + if time.time() - t > 5*60: + self.processor.push_response({'method':'server.peers', 'result':[self.get_peers()]}) + s.send('NAMES #electrum\n') + t = time.time() + self.peers = {} + except: + traceback.print_exc(file=sys.stdout) + finally: + sf.close() + s.close() -def clean_session_thread(): - while not stopping: - time.sleep(30) - t = time.time() - for k,s in sessions.items(): - if s.get('type') == 'persistent': continue - t0 = s['last_time'] - if t - t0 > 5*60: - sessions.pop(k) - print "lost session", k - -def irc_thread(): - global peer_list - NICK = 'E_'+random_string(10) - while not stopping: - try: - s = socket.socket() - s.connect(('irc.freenode.net', 6667)) - s.send('USER electrum 0 * :'+config.get('server','host')+' '+config.get('server','ircname')+'\n') - s.send('NICK '+NICK+'\n') - s.send('JOIN #electrum\n') - sf = s.makefile('r', 0) - t = 0 - while not stopping: - line = sf.readline() - line = line.rstrip('\r\n') - line = line.split() - if line[0]=='PING': - s.send('PONG '+line[1]+'\n') - elif '353' in line: # answer to /names - k = line.index('353') - for item in line[k+1:]: - if item[0:2] == 'E_': - s.send('WHO %s\n'%item) - elif '352' in line: # answer to /who - # warning: this is a horrible hack which apparently works - k = line.index('352') - ip = line[k+4] - ip = socket.gethostbyname(ip) - name = line[k+6] - host = line[k+9] - peer_list[name] = (ip,host) - if time.time() - t > 5*60: - s.send('NAMES #electrum\n') - t = time.time() - peer_list = {} - except: - traceback.print_exc(file=sys.stdout) - finally: - sf.close() - s.close() - - -def get_peers_json(_,__): - return peer_list.values() - -def http_server_thread(): - # see http://code.google.com/p/jsonrpclib/ - from SocketServer import ThreadingMixIn - from StratumJSONRPCServer import StratumJSONRPCServer - class StratumThreadedJSONRPCServer(ThreadingMixIn, StratumJSONRPCServer): pass - server = StratumThreadedJSONRPCServer(( config.get('server','host'), 8081)) - server.register_function(get_peers_json, 'server.peers') - server.register_function(cmd_stop, 'stop') - server.register_function(cmd_load, 'load') - server.register_function(get_banner, 'server.banner') - server.register_function(lambda a,b,c: store.send_tx(c), 'transaction.broadcast') - server.register_function(address_get_history_json, 'address.get_history') - server.register_function(add_address_to_session_json, 'address.subscribe') - server.register_function(subscribe_to_numblocks_json, 'numblocks.subscribe') - server.register_function(client_version_json, 'client.version') - server.register_function(create_session_json, 'session.create') # internal message (not part of protocol) - server.register_function(poll_session_json, 'session.poll') # internal message (not part of protocol) - server.serve_forever() if __name__ == '__main__': @@ -967,35 +388,34 @@ if __name__ == '__main__': sys.exit(0) # backend - # from db import MyStore - store = MyStore(config,address_queue) + store = abe_backend.AbeStore(config) - # supported protocols + # old protocol thread.start_new_thread(native_server_thread, ()) - thread.start_new_thread(tcp_server_thread, ()) - thread.start_new_thread(http_server_thread, ()) thread.start_new_thread(clean_session_thread, ()) - if (config.get('server','irc') == 'yes' ): - thread.start_new_thread(irc_thread, ()) + processor = AbeProcessor() + shared = Shared() + # Bind shared to processor since constructor is user defined + processor.shared = shared + processor.start() + # dispatcher + dispatcher = Dispatcher(shared, processor) + dispatcher.start() + # Create various transports we need + transports = [ TcpServer(shared, processor, "ecdsa.org",50001), + HttpServer(shared, processor, "ecdsa.org",8081) + ] + for server in transports: + server.start() - print "starting Electrum server" - old_block_number = None - while not stopping: - block_number = store.main_iteration() + if (config.get('server','irc') == 'yes' ): + irc = Irc(processor) + irc.start() - if block_number != old_block_number: - old_block_number = block_number - for session_id in sessions_sub_numblocks.keys(): - send_numblocks(session_id) - while True: - try: - addr = address_queue.get(False) - except: - break - do_update_address(addr) - time.sleep(10) + print "starting Electrum server" + store.run(processor) print "server stopped"