new workflow
[electrum-server.git] / server.py
index 0d0a63a..1c9a409 100755 (executable)
--- a/server.py
+++ b/server.py
@@ -24,360 +24,8 @@ Todo:
  mempool transactions do not need to be added to the database; it slows it down
 """
 
+import abe_backend
 
-from Abe.abe import hash_to_address, decode_check_address
-from Abe.DataStore import DataStore as Datastore_class
-from Abe import DataStore, readconf, BCDataStream,  deserialize, util, base58
-
-import psycopg2, binascii
-
-import thread, traceback, sys, urllib, operator
-from json import dumps, loads
-
-
-class MyStore(Datastore_class):
-
-    def __init__(self, config):
-        conf = DataStore.CONFIG_DEFAULTS
-        args, argv = readconf.parse_argv( [], conf)
-        args.dbtype = config.get('database','type')
-        if args.dbtype == 'sqlite3':
-            args.connect_args = { 'database' : config.get('database','database') }
-        elif args.dbtype == 'MySQLdb':
-            args.connect_args = { 'db' : config.get('database','database'), 'user' : config.get('database','username'), 'passwd' : config.get('database','password') }
-        elif args.dbtype == 'psycopg2':
-            args.connect_args = { 'database' : config.get('database','database') }
-
-        Datastore_class.__init__(self,args)
-
-        self.tx_cache = {}
-        self.mempool_keys = {}
-        self.bitcoind_url = 'http://%s:%s@%s:%s/' % ( config.get('bitcoind','user'), config.get('bitcoind','password'), config.get('bitcoind','host'), config.get('bitcoind','port'))
-
-        self.address_queue = Queue()
-
-        self.dblock = thread.allocate_lock()
-
-
-
-    def import_block(self, b, chain_ids=frozenset()):
-        block_id = super(MyStore, self).import_block(b, chain_ids)
-        for pos in xrange(len(b['transactions'])):
-            tx = b['transactions'][pos]
-            if 'hash' not in tx:
-                tx['hash'] = util.double_sha256(tx['tx'])
-            tx_id = store.tx_find_id_and_value(tx)
-            if tx_id:
-                self.update_tx_cache(tx_id)
-            else:
-                print "error: import_block: no tx_id"
-        return block_id
-
-
-    def update_tx_cache(self, txid):
-        inrows = self.get_tx_inputs(txid, False)
-        for row in inrows:
-            _hash = self.binout(row[6])
-            address = hash_to_address(chr(0), _hash)
-            if self.tx_cache.has_key(address):
-                print "cache: invalidating", address
-                self.tx_cache.pop(address)
-            self.address_queue.put(address)
-
-        outrows = self.get_tx_outputs(txid, False)
-        for row in outrows:
-            _hash = self.binout(row[6])
-            address = hash_to_address(chr(0), _hash)
-            if self.tx_cache.has_key(address):
-                print "cache: invalidating", address
-                self.tx_cache.pop(address)
-            self.address_queue.put(address)
-
-    def safe_sql(self,sql, params=(), lock=True):
-        try:
-            if lock: self.dblock.acquire()
-            ret = self.selectall(sql,params)
-            if lock: self.dblock.release()
-            return ret
-        except:
-            print "sql error", sql
-            return []
-
-    def get_tx_outputs(self, tx_id, lock=True):
-        return self.safe_sql("""SELECT
-                txout.txout_pos,
-                txout.txout_scriptPubKey,
-                txout.txout_value,
-                nexttx.tx_hash,
-                nexttx.tx_id,
-                txin.txin_pos,
-                pubkey.pubkey_hash
-              FROM txout
-              LEFT JOIN txin ON (txin.txout_id = txout.txout_id)
-              LEFT JOIN pubkey ON (pubkey.pubkey_id = txout.pubkey_id)
-              LEFT JOIN tx nexttx ON (txin.tx_id = nexttx.tx_id)
-             WHERE txout.tx_id = %d 
-             ORDER BY txout.txout_pos
-        """%(tx_id), (), lock)
-
-    def get_tx_inputs(self, tx_id, lock=True):
-        return self.safe_sql(""" SELECT
-                txin.txin_pos,
-                txin.txin_scriptSig,
-                txout.txout_value,
-                COALESCE(prevtx.tx_hash, u.txout_tx_hash),
-                prevtx.tx_id,
-                COALESCE(txout.txout_pos, u.txout_pos),
-                pubkey.pubkey_hash
-              FROM txin
-              LEFT JOIN txout ON (txout.txout_id = txin.txout_id)
-              LEFT JOIN pubkey ON (pubkey.pubkey_id = txout.pubkey_id)
-              LEFT JOIN tx prevtx ON (txout.tx_id = prevtx.tx_id)
-              LEFT JOIN unlinked_txin u ON (u.txin_id = txin.txin_id)
-             WHERE txin.tx_id = %d
-             ORDER BY txin.txin_pos
-             """%(tx_id,), (), lock)
-
-    def get_address_out_rows(self, dbhash):
-        return self.safe_sql(""" SELECT
-                b.block_nTime,
-                cc.chain_id,
-                b.block_height,
-                1,
-                b.block_hash,
-                tx.tx_hash,
-                tx.tx_id,
-                txin.txin_pos,
-                -prevout.txout_value
-              FROM chain_candidate cc
-              JOIN block b ON (b.block_id = cc.block_id)
-              JOIN block_tx ON (block_tx.block_id = b.block_id)
-              JOIN tx ON (tx.tx_id = block_tx.tx_id)
-              JOIN txin ON (txin.tx_id = tx.tx_id)
-              JOIN txout prevout ON (txin.txout_id = prevout.txout_id)
-              JOIN pubkey ON (pubkey.pubkey_id = prevout.pubkey_id)
-             WHERE pubkey.pubkey_hash = ?
-               AND cc.in_longest = 1""", (dbhash,))
-
-    def get_address_out_rows_memorypool(self, dbhash):
-        return self.safe_sql(""" SELECT
-                1,
-                tx.tx_hash,
-                tx.tx_id,
-                txin.txin_pos,
-                -prevout.txout_value
-              FROM tx 
-              JOIN txin ON (txin.tx_id = tx.tx_id)
-              JOIN txout prevout ON (txin.txout_id = prevout.txout_id)
-              JOIN pubkey ON (pubkey.pubkey_id = prevout.pubkey_id)
-             WHERE pubkey.pubkey_hash = ? """, (dbhash,))
-
-    def get_address_in_rows(self, dbhash):
-        return self.safe_sql(""" SELECT
-                b.block_nTime,
-                cc.chain_id,
-                b.block_height,
-                0,
-                b.block_hash,
-                tx.tx_hash,
-                tx.tx_id,
-                txout.txout_pos,
-                txout.txout_value
-              FROM chain_candidate cc
-              JOIN block b ON (b.block_id = cc.block_id)
-              JOIN block_tx ON (block_tx.block_id = b.block_id)
-              JOIN tx ON (tx.tx_id = block_tx.tx_id)
-              JOIN txout ON (txout.tx_id = tx.tx_id)
-              JOIN pubkey ON (pubkey.pubkey_id = txout.pubkey_id)
-             WHERE pubkey.pubkey_hash = ?
-               AND cc.in_longest = 1""", (dbhash,))
-
-    def get_address_in_rows_memorypool(self, dbhash):
-        return self.safe_sql( """ SELECT
-                0,
-                tx.tx_hash,
-                tx.tx_id,
-                txout.txout_pos,
-                txout.txout_value
-              FROM tx
-              JOIN txout ON (txout.tx_id = tx.tx_id)
-              JOIN pubkey ON (pubkey.pubkey_id = txout.pubkey_id)
-             WHERE pubkey.pubkey_hash = ? """, (dbhash,))
-
-    def get_history(self, addr):
-        
-        cached_version = self.tx_cache.get( addr )
-        if cached_version is not None:
-            return cached_version
-
-        version, binaddr = decode_check_address(addr)
-        if binaddr is None:
-            return None
-
-        dbhash = self.binin(binaddr)
-        rows = []
-        rows += self.get_address_out_rows( dbhash )
-        rows += self.get_address_in_rows( dbhash )
-
-        txpoints = []
-        known_tx = []
-
-        for row in rows:
-            try:
-                nTime, chain_id, height, is_in, blk_hash, tx_hash, tx_id, pos, value = row
-            except:
-                print "cannot unpack row", row
-                break
-            tx_hash = self.hashout_hex(tx_hash)
-            txpoint = {
-                    "nTime":    int(nTime),
-                    "height":   int(height),
-                    "is_in":    int(is_in),
-                    "blk_hash": self.hashout_hex(blk_hash),
-                    "tx_hash":  tx_hash,
-                    "tx_id":    int(tx_id),
-                    "pos":      int(pos),
-                    "value":    int(value),
-                    }
-
-            txpoints.append(txpoint)
-            known_tx.append(self.hashout_hex(tx_hash))
-
-
-        # todo: sort them really...
-        txpoints = sorted(txpoints, key=operator.itemgetter("nTime"))
-
-        # read memory pool
-        rows = []
-        rows += self.get_address_in_rows_memorypool( dbhash )
-        rows += self.get_address_out_rows_memorypool( dbhash )
-        address_has_mempool = False
-
-        for row in rows:
-            is_in, tx_hash, tx_id, pos, value = row
-            tx_hash = self.hashout_hex(tx_hash)
-            if tx_hash in known_tx:
-                continue
-
-            # this means that pending transactions were added to the db, even if they are not returned by getmemorypool
-            address_has_mempool = True
-
-            # this means pending transactions are returned by getmemorypool
-            if tx_hash not in self.mempool_keys:
-                continue
-
-            #print "mempool", tx_hash
-            txpoint = {
-                    "nTime":    0,
-                    "height":   0,
-                    "is_in":    int(is_in),
-                    "blk_hash": 'mempool', 
-                    "tx_hash":  tx_hash,
-                    "tx_id":    int(tx_id),
-                    "pos":      int(pos),
-                    "value":    int(value),
-                    }
-            txpoints.append(txpoint)
-
-
-        for txpoint in txpoints:
-            tx_id = txpoint['tx_id']
-            
-            txinputs = []
-            inrows = self.get_tx_inputs(tx_id)
-            for row in inrows:
-                _hash = self.binout(row[6])
-                address = hash_to_address(chr(0), _hash)
-                txinputs.append(address)
-            txpoint['inputs'] = txinputs
-            txoutputs = []
-            outrows = self.get_tx_outputs(tx_id)
-            for row in outrows:
-                _hash = self.binout(row[6])
-                address = hash_to_address(chr(0), _hash)
-                txoutputs.append(address)
-            txpoint['outputs'] = txoutputs
-
-            # for all unspent inputs, I want their scriptpubkey. (actually I could deduce it from the address)
-            if not txpoint['is_in']:
-                # detect if already redeemed...
-                for row in outrows:
-                    if row[6] == dbhash: break
-                else:
-                    raise
-                #row = self.get_tx_output(tx_id,dbhash)
-                # pos, script, value, o_hash, o_id, o_pos, binaddr = row
-                # if not redeemed, we add the script
-                if row:
-                    if not row[4]: txpoint['raw_scriptPubKey'] = row[1]
-
-        # cache result
-        if not address_has_mempool:
-            self.tx_cache[addr] = txpoints
-        
-        return txpoints
-
-
-
-    def memorypool_update(store):
-
-        ds = BCDataStream.BCDataStream()
-        previous_transactions = store.mempool_keys
-        store.mempool_keys = []
-
-        postdata = dumps({"method": 'getmemorypool', 'params': [], 'id':'jsonrpc'})
-
-        respdata = urllib.urlopen(store.bitcoind_url, postdata).read()
-        r = loads(respdata)
-        if r['error'] != None:
-            return
-
-        v = r['result'].get('transactions')
-        for hextx in v:
-            ds.clear()
-            ds.write(hextx.decode('hex'))
-            tx = deserialize.parse_Transaction(ds)
-            tx['hash'] = util.double_sha256(tx['tx'])
-            tx_hash = store.hashin(tx['hash'])
-
-            store.mempool_keys.append(tx_hash)
-            if store.tx_find_id_and_value(tx):
-                pass
-            else:
-                tx_id = store.import_tx(tx, False)
-                store.update_tx_cache(tx_id)
-    
-        store.commit()
-
-
-    def send_tx(self,tx):
-        postdata = dumps({"method": 'importtransaction', 'params': [tx], 'id':'jsonrpc'})
-        respdata = urllib.urlopen(self.bitcoind_url, postdata).read()
-        r = loads(respdata)
-        if r['error'] != None:
-            out = "error: transaction rejected by memorypool\n"+tx
-        else:
-            out = r['result']
-        return out
-
-
-    def main_iteration(store):
-        try:
-            store.dblock.acquire()
-            store.catch_up()
-            store.memorypool_update()
-            block_number = store.get_block_number(1)
-
-        except IOError:
-            print "IOError: cannot reach bitcoind"
-            block_number = 0
-        except:
-            traceback.print_exc(file=sys.stdout)
-            block_number = 0
-        finally:
-            store.dblock.release()
-
-        return block_number
 
 
 
@@ -418,16 +66,12 @@ except:
 password = config.get('server','password')
 
 stopping = False
-block_number = -1
 sessions = {}
-sessions_sub_numblocks = {} # sessions that have subscribed to the service
 
 m_sessions = [{}] # served by http
 
 peer_list = {}
 
-wallets = {} # for ultra-light clients such as bccapi
-
 from Queue import Queue
 input_queue = Queue()
 output_queue = Queue()
@@ -468,7 +112,7 @@ def modified_addresses(a_session):
     ret = {}
     k = 0
     for addr in addresses:
-        status = get_address_status( addr )
+        status = store.get_status( addr )
         msg_id, last_status = addresses.get( addr )
         if last_status != status:
             addresses[addr] = msg_id, status
@@ -486,9 +130,10 @@ def poll_session(session_id):
         print time.asctime(), "session not found", session_id
         return -1, {}
     else:
+        sessions[session_id]['last_time'] = time.time()
         ret, addresses = modified_addresses(session)
         if ret: sessions[session_id]['addresses'] = addresses
-        return repr( (block_number,ret))
+        return repr( (store.block_number,ret))
 
 
 def poll_session_json(session_id, message_id):
@@ -496,6 +141,7 @@ def poll_session_json(session_id, message_id):
     if session is None:
         raise BaseException("session not found %s"%session_id)
     else:
+        m_sessions[0][session_id]['last_time'] = time.time()
         out = []
         ret, addresses = modified_addresses(session)
         if ret: 
@@ -513,75 +159,27 @@ def poll_session_json(session_id, message_id):
         return out
 
 
-def do_update_address(addr):
-    # an address was involved in a transaction; we check if it was subscribed to in a session
-    # the address can be subscribed in several sessions; the cache should ensure that we don't do redundant requests
-
-    for session_id in sessions.keys():
-        session = sessions[session_id]
-        if session.get('type') != 'persistent': continue
-        addresses = session['addresses'].keys()
-
-        if addr in addresses:
-            status = get_address_status( addr )
-            message_id, last_status = session['addresses'][addr]
-            if last_status != status:
-                #print "sending new status for %s:"%addr, status
-                send_status(session_id,message_id,addr,status)
-                sessions[session_id]['addresses'][addr] = (message_id,status)
-
-def get_address_status(addr):
-    # get address status, i.e. the last block for that address.
-    tx_points = store.get_history(addr)
-    if not tx_points:
-        status = None
-    else:
-        lastpoint = tx_points[-1]
-        status = lastpoint['blk_hash']
-        # this is a temporary hack; move it up once old clients have disappeared
-        if status == 'mempool': # and session['version'] != "old":
-            status = status + ':%d'% len(tx_points)
-    return status
 
 
-def send_numblocks(session_id):
-    message_id = sessions_sub_numblocks[session_id]
-    out = json.dumps( {'id':message_id, 'result':block_number} )
-    output_queue.put((session_id, out))
-
-def send_status(session_id, message_id, address, status):
-    out = json.dumps( { 'id':message_id, 'result':status } )
-    output_queue.put((session_id, out))
-
 def address_get_history_json(_,message_id,address):
     return store.get_history(address)
 
-def subscribe_to_numblocks(session_id, message_id):
-    sessions_sub_numblocks[session_id] = message_id
-    send_numblocks(session_id)
-
 def subscribe_to_numblocks_json(session_id, message_id):
     global m_sessions
     m_sessions[0][session_id]['numblocks'] = message_id,block_number
     return block_number
 
-def subscribe_to_address(session_id, message_id, address):
-    status = get_address_status(address)
-    sessions[session_id]['addresses'][address] = (message_id, status)
-    sessions[session_id]['last_time'] = time.time()
-    send_status(session_id, message_id, address, status)
-
 def add_address_to_session_json(session_id, message_id, address):
     global m_sessions
     sessions = m_sessions[0]
-    status = get_address_status(address)
+    status = store.get_status(address)
     sessions[session_id]['addresses'][address] = (message_id, status)
     sessions[session_id]['last_time'] = time.time()
     m_sessions[0] = sessions
     return status
 
 def add_address_to_session(session_id, address):
-    status = get_address_status(address)
+    status = store.get_status(address)
     sessions[session_id]['addresses'][address] = ("", status)
     sessions[session_id]['last_time'] = time.time()
     return status
@@ -602,22 +200,13 @@ def client_version_json(session_id, _, version):
     sessions[session_id]['version'] = version
     m_sessions[0] = sessions
 
-def create_session_json(_, __):
-    sessions = m_sessions[0]
-    session_id = random_string(10)
-    print "creating session", session_id
-    sessions[session_id] = { 'addresses':{}, 'numblocks':('','') }
-    sessions[session_id]['last_time'] = time.time()
-    m_sessions[0] = sessions
-    return session_id
-
 
 
 def get_banner(_,__):
     return config.get('server','banner').replace('\\n','\n')
 
 def update_session(session_id,addresses):
-    """deprecated in 0.42"""
+    """deprecated in 0.42, wad replaced by add_address_to_session"""
     sessions[session_id]['addresses'] = {}
     for a in addresses:
         sessions[session_id]['addresses'][a] = ''
@@ -738,129 +327,61 @@ def do_command(cmd, data, ipaddr):
     return out
 
 
-
-####################################################################
-
-def tcp_server_thread():
-    thread.start_new_thread(process_input_queue, ())
-    thread.start_new_thread(process_output_queue, ())
-
-    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-    s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
-    s.bind((config.get('server','host'), 50001))
-    s.listen(1)
+def clean_session_thread():
     while not stopping:
-        conn, addr = s.accept()
-        try:
-            thread.start_new_thread(tcp_client_thread, (addr, conn,))
-        except:
-            # can't start new thread if there is no memory..
-            traceback.print_exc(file=sys.stdout)
-
-
-def close_session(session_id):
-    #print "lost connection", session_id
-    sessions.pop(session_id)
-    if session_id in sessions_sub_numblocks:
-        sessions_sub_numblocks.pop(session_id)
-
+        time.sleep(30)
+        t = time.time()
+        for k,s in sessions.items():
+            if s.get('type') == 'persistent': continue
+            t0 = s['last_time']
+            if t - t0 > 5*60:
+                sessions.pop(k)
+                print "lost session", k
+            
 
-# one thread per client. put requests in a queue.
-def tcp_client_thread(ipaddr,conn):
-    """ use a persistent connection. put commands in a queue."""
+####################################################################
 
-    print timestr(), "TCP session", ipaddr
-    global sessions
 
-    session_id = random_string(10)
-    sessions[session_id] = { 'conn':conn, 'addresses':{}, 'version':'unknown', 'type':'persistent' }
+import stratum
 
-    ipaddr = ipaddr[0]
-    msg = ''
+class AbeProcessor(stratum.Processor):
+    def process(self,request):
+        message_id = request['id']
+        method = request['method']
+        params = request.get('params',[])
+        #print request
 
-    while not stopping:
-        try:
-            d = conn.recv(1024)
-        except socket.error:
-            d = ''
-        if not d:
-            close_session(session_id)
-            break
-
-        msg += d
-        while True:
-            s = msg.find('\n')
-            if s ==-1:
-                break
-            else:
-                c = msg[0:s].strip()
-                msg = msg[s+1:]
-                if c == 'quit': 
-                    conn.close()
-                    close_session(session_id)
-                    return
-                try:
-                    c = json.loads(c)
-                except:
-                    print "json error", repr(c)
-                    continue
-                try:
-                    message_id = c.get('id')
-                    method = c.get('method')
-                    params = c.get('params')
-                except:
-                    print "syntax error", repr(c), ipaddr
-                    continue
-
-                # add to queue
-                input_queue.put((session_id, message_id, method, params))
-
-
-
-# read commands from the input queue. perform requests, etc. this should be called from the main thread.
-def process_input_queue():
-    while not stopping:
-        session_id, message_id, method, data = input_queue.get()
-        if session_id not in sessions.keys():
-            continue
-        out = None
-        if method == 'address.subscribe':
-            address = data[0]
-            subscribe_to_address(session_id,message_id,address)
-        elif method == 'numblocks.subscribe':
-            subscribe_to_numblocks(session_id,message_id)
+        result = ''
+        if method == 'numblocks.subscribe':
+            result = store.block_number
+        elif method == 'address.subscribe':
+            address = params[0]
+            store.watch_address(address)
+            status = store.get_status(address)
+            result = status
         elif method == 'client.version':
-            sessions[session_id]['version'] = data[0]
+            #session.version = params[0]
+            pass
         elif method == 'server.banner':
-            out = { 'result':config.get('server','banner').replace('\\n','\n') } 
+            result = config.get('server','banner').replace('\\n','\n')
         elif method == 'server.peers':
-            out = { 'result':peer_list.values() } 
+            result = peer_list.values()
         elif method == 'address.get_history':
-            address = data[0]
-            out = { 'result':store.get_history( address ) } 
+            address = params[0]
+            result = store.get_history( address ) 
         elif method == 'transaction.broadcast':
-            txo = store.send_tx(data[0])
+            txo = store.send_tx(params[0])
             print "sent tx:", txo
-            out = {'result':txo }
+            result = txo 
         else:
-            print "unknown command", method
-        if out:
-            out['id'] = message_id
-            out = json.dumps( out )
-            output_queue.put((session_id, out))
+            print "unknown method", request
 
-# this is a separate thread
-def process_output_queue():
-    while not stopping:
-        session_id, out = output_queue.get()
-        session = sessions.get(session_id)
-        if session: 
-            try:
-                conn = session.get('conn')
-                conn.send(out+'\n')
-            except:
-                close_session(session_id)
-                
+        if result!='':
+            response = { 'id':message_id, 'method':method, 'params':params, 'result':result }
+            self.push_response(response)
+
+    def get_status(self,addr):
+        return store.get_status(addr)
 
 
 
@@ -869,17 +390,6 @@ def process_output_queue():
 
 
 
-def clean_session_thread():
-    while not stopping:
-        time.sleep(30)
-        t = time.time()
-        for k,s in sessions.items():
-            if s.get('type') == 'persistent': continue
-            t0 = s['last_time']
-            if t - t0 > 5*60:
-                sessions.pop(k)
-                print "lost session", k
-            
 
 def irc_thread():
     global peer_list
@@ -974,35 +484,37 @@ if __name__ == '__main__':
         sys.exit(0)
 
     # backend
-    # from db import MyStore
-    store = MyStore(config)
+    store = abe_backend.AbeStore(config)
 
     # supported protocols
     thread.start_new_thread(native_server_thread, ())
-    thread.start_new_thread(tcp_server_thread, ())
-    thread.start_new_thread(http_server_thread, ())
     thread.start_new_thread(clean_session_thread, ())
 
-    if (config.get('server','irc') == 'yes' ):
-       thread.start_new_thread(irc_thread, ())
+    #thread.start_new_thread(http_server_thread, ())
 
-    print "starting Electrum server"
 
-    old_block_number = None
-    while not stopping:
-        block_number = store.main_iteration()
+    processor = AbeProcessor()
+    shared = stratum.Shared()
+    # Bind shared to processor since constructor is user defined
+    processor.shared = shared
+    processor.start()
 
-        if block_number != old_block_number:
-            old_block_number = block_number
-            for session_id in sessions_sub_numblocks.keys():
-                send_numblocks(session_id)
-        while True:
-            try:
-                addr = store.address_queue.get(False)
-            except:
-                break
-            do_update_address(addr)
+    # Create various transports we need
 
-        time.sleep(10)
+    #tcp stratum
+    tcpserver = stratum.TcpServer(shared, processor, "ecdsa.org",50001)
+    tcpserver.start()
+
+    #http stratum
+    from StratumJSONRPCServer import HttpServer
+    server = HttpServer(shared, processor, "ecdsa.org",8081)
+    server.start()
+
+
+    if (config.get('server','irc') == 'yes' ):
+       thread.start_new_thread(irc_thread, ())
+
+    print "starting Electrum server"
+    store.run(processor)
     print "server stopped"