2 # Copyright(C) 2011 thomasv@gitorious
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU Affero General Public License as
6 # published by the Free Software Foundation, either version 3 of the
7 # License, or (at your option) any later version.
9 # This program is distributed in the hope that it will be useful, but
10 # WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 # Affero General Public License for more details.
14 # You should have received a copy of the GNU Affero General Public
15 # License along with this program. If not, see
16 # <http://www.gnu.org/licenses/agpl.html>.
20 * server should check and return bitcoind status..
21 * improve txpoint sorting
22 * command to check cache
26 import time, socket, operator, thread, ast, sys,re
27 import psycopg2, binascii
30 from Abe.abe import hash_to_address, decode_check_address
31 from Abe.DataStore import DataStore as Datastore_class
32 from Abe import DataStore, readconf, BCDataStream, deserialize, util, base58
36 config = ConfigParser.ConfigParser()
37 # set some defaults, which will be overwritten by the config file
38 config.add_section('server')
39 config.set('server','banner', 'Welcome to Electrum!')
40 config.set('server', 'host', 'localhost')
41 config.set('server', 'port', 50000)
42 config.set('server', 'password', '')
43 config.set('server', 'irc', 'yes')
44 config.set('server', 'cache', 'no')
45 config.set('server', 'ircname', 'Electrum server')
46 config.add_section('database')
47 config.set('database', 'type', 'psycopg2')
48 config.set('database', 'database', 'abe')
51 f = open('/etc/electrum.conf','r')
55 print "Could not read electrum.conf. I will use the default values."
60 dblock = thread.allocate_lock()
64 class MyStore(Datastore_class):
66 def import_tx(self, tx, is_coinbase):
67 tx_id = super(MyStore, self).import_tx(tx, is_coinbase)
68 if config.get('server', 'cache') == 'yes': self.update_tx_cache(tx_id)
70 def update_tx_cache(self, txid):
71 inrows = self.get_tx_inputs(txid, False)
73 _hash = store.binout(row[6])
74 address = hash_to_address(chr(0), _hash)
75 if self.tx_cache.has_key(address):
76 print "cache: invalidating", address, self.ismempool
77 self.tx_cache.pop(address)
78 outrows = self.get_tx_outputs(txid, False)
80 _hash = store.binout(row[6])
81 address = hash_to_address(chr(0), _hash)
82 if self.tx_cache.has_key(address):
83 print "cache: invalidating", address, self.ismempool
84 self.tx_cache.pop(address)
86 def safe_sql(self,sql, params=(), lock=True):
88 if lock: dblock.acquire()
89 ret = self.selectall(sql,params)
90 if lock: dblock.release()
93 print "sql error", sql
96 def get_tx_outputs(self, tx_id, lock=True):
97 return self.safe_sql("""SELECT
99 txout.txout_scriptPubKey,
106 LEFT JOIN txin ON (txin.txout_id = txout.txout_id)
107 LEFT JOIN pubkey ON (pubkey.pubkey_id = txout.pubkey_id)
108 LEFT JOIN tx nexttx ON (txin.tx_id = nexttx.tx_id)
109 WHERE txout.tx_id = %d
110 ORDER BY txout.txout_pos
111 """%(tx_id), (), lock)
113 def get_tx_inputs(self, tx_id, lock=True):
114 return self.safe_sql(""" SELECT
118 COALESCE(prevtx.tx_hash, u.txout_tx_hash),
120 COALESCE(txout.txout_pos, u.txout_pos),
123 LEFT JOIN txout ON (txout.txout_id = txin.txout_id)
124 LEFT JOIN pubkey ON (pubkey.pubkey_id = txout.pubkey_id)
125 LEFT JOIN tx prevtx ON (txout.tx_id = prevtx.tx_id)
126 LEFT JOIN unlinked_txin u ON (u.txin_id = txin.txin_id)
127 WHERE txin.tx_id = %d
128 ORDER BY txin.txin_pos
129 """%(tx_id,), (), lock)
131 def get_address_out_rows(self, dbhash):
132 return self.safe_sql(""" SELECT
142 FROM chain_candidate cc
143 JOIN block b ON (b.block_id = cc.block_id)
144 JOIN block_tx ON (block_tx.block_id = b.block_id)
145 JOIN tx ON (tx.tx_id = block_tx.tx_id)
146 JOIN txin ON (txin.tx_id = tx.tx_id)
147 JOIN txout prevout ON (txin.txout_id = prevout.txout_id)
148 JOIN pubkey ON (pubkey.pubkey_id = prevout.pubkey_id)
149 WHERE pubkey.pubkey_hash = ?
150 AND cc.in_longest = 1""", (dbhash,))
152 def get_address_out_rows_memorypool(self, dbhash):
153 return self.safe_sql(""" SELECT
160 JOIN txin ON (txin.tx_id = tx.tx_id)
161 JOIN txout prevout ON (txin.txout_id = prevout.txout_id)
162 JOIN pubkey ON (pubkey.pubkey_id = prevout.pubkey_id)
163 WHERE pubkey.pubkey_hash = ? """, (dbhash,))
165 def get_address_in_rows(self, dbhash):
166 return self.safe_sql(""" SELECT
176 FROM chain_candidate cc
177 JOIN block b ON (b.block_id = cc.block_id)
178 JOIN block_tx ON (block_tx.block_id = b.block_id)
179 JOIN tx ON (tx.tx_id = block_tx.tx_id)
180 JOIN txout ON (txout.tx_id = tx.tx_id)
181 JOIN pubkey ON (pubkey.pubkey_id = txout.pubkey_id)
182 WHERE pubkey.pubkey_hash = ?
183 AND cc.in_longest = 1""", (dbhash,))
185 def get_address_in_rows_memorypool(self, dbhash):
186 return self.safe_sql( """ SELECT
193 JOIN txout ON (txout.tx_id = tx.tx_id)
194 JOIN pubkey ON (pubkey.pubkey_id = txout.pubkey_id)
195 WHERE pubkey.pubkey_hash = ? """, (dbhash,))
197 def get_txpoints(self, addr):
199 if config.get('server','cache') == 'yes':
200 cached_version = self.tx_cache.get( addr )
201 if cached_version is not None:
202 return cached_version
204 version, binaddr = decode_check_address(addr)
207 dbhash = self.binin(binaddr)
209 rows += self.get_address_out_rows( dbhash )
210 rows += self.get_address_in_rows( dbhash )
217 nTime, chain_id, height, is_in, blk_hash, tx_hash, tx_id, pos, value = row
219 print "cannot unpack row", row
221 tx_hash = self.hashout_hex(tx_hash)
224 #"chain_id": int(chain_id),
225 "height": int(height),
227 "blk_hash": self.hashout_hex(blk_hash),
234 txpoints.append(txpoint)
235 known_tx.append(self.hashout_hex(tx_hash))
238 # todo: sort them really...
239 txpoints = sorted(txpoints, key=operator.itemgetter("nTime"))
243 rows += self.get_address_in_rows_memorypool( dbhash )
244 rows += self.get_address_out_rows_memorypool( dbhash )
245 address_has_mempool = False
248 is_in, tx_hash, tx_id, pos, value = row
249 tx_hash = self.hashout_hex(tx_hash)
250 if tx_hash in known_tx:
253 address_has_mempool = True
254 #print "mempool", tx_hash
260 "blk_hash": 'mempool', #':%s'%tx_hash,
266 txpoints.append(txpoint)
269 for txpoint in txpoints:
270 tx_id = txpoint['tx_id']
273 inrows = self.get_tx_inputs(tx_id)
275 _hash = self.binout(row[6])
276 address = hash_to_address(chr(0), _hash)
277 txinputs.append(address)
278 txpoint['inputs'] = txinputs
280 outrows = self.get_tx_outputs(tx_id)
282 _hash = self.binout(row[6])
283 address = hash_to_address(chr(0), _hash)
284 txoutputs.append(address)
285 txpoint['outputs'] = txoutputs
287 # for all unspent inputs, I want their scriptpubkey. (actually I could deduce it from the address)
288 if not txpoint['is_in']:
289 # detect if already redeemed...
291 if row[6] == dbhash: break
294 #row = self.get_tx_output(tx_id,dbhash)
295 # pos, script, value, o_hash, o_id, o_pos, binaddr = row
296 # if not redeemed, we add the script
298 if not row[4]: txpoint['raw_scriptPubKey'] = row[1]
301 if config.get('server','cache') == 'yes' and not address_has_mempool:
302 self.tx_cache[addr] = txpoints
307 def get_status(self, addr):
308 # last block for an address.
309 tx_points = self.get_txpoints(addr)
313 return tx_points[-1]['blk_hash']
318 conn = bitcoinrpc.connect_to_local()
320 v = conn.importtransaction(tx)
322 v = "error: transaction rejected by memorypool"
326 def listen_thread(store):
327 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
328 s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
329 s.bind((config.get('server','host'), config.getint('server','port')))
332 conn, addr = s.accept()
333 thread.start_new_thread(client_thread, (addr, conn,))
335 def random_string(N):
336 import random, string
337 return ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(N))
339 def client_thread(ipaddr,conn):
340 #print "client thread", ipaddr
355 cmd, data = ast.literal_eval(msg[:-1])
357 print "syntax error", repr(msg)
362 out = "%d"%block_number
364 elif cmd in ['session','new_session']:
365 session_id = random_string(10)
368 addresses = ast.literal_eval(data)
371 version, addresses = ast.literal_eval(data)
377 print time.asctime(), "new session", version, ipaddr, session_id, addresses[0] if addresses else addresses, len(addresses)
379 sessions[session_id] = { 'addresses':{}, 'version':version }
381 sessions[session_id]['addresses'][a] = ''
382 out = repr( (session_id, config.get('server','banner').replace('\\n','\n') ) )
383 sessions[session_id]['last_time'] = time.time()
385 elif cmd=='update_session':
387 session_id, addresses = ast.literal_eval(data)
393 print time.asctime(), "update session", ipaddr, session_id, addresses[0] if addresses else addresses, len(addresses)
395 sessions[session_id]['addresses'] = {}
397 sessions[session_id]['addresses'][a] = ''
399 sessions[session_id]['last_time'] = time.time()
403 session = sessions.get(session_id)
405 print time.asctime(), "session not found", session_id, ipaddr
406 out = repr( (-1, {}))
409 addresses = session['addresses']
410 session['last_time'] = time.time()
413 for addr in addresses:
414 if store.tx_cache.get( addr ) is not None: k += 1
415 status = store.get_status( addr )
416 last_status = addresses.get( addr )
417 if last_status != status:
418 addresses[addr] = status
421 sessions[session_id]['addresses'] = addresses
422 out = repr( (block_number, ret ) )
423 t2 = time.time() - t1
425 print "high load:", session_id, "%d/%d"%(k,len(addresses)), t2
430 out = repr( store.get_txpoints( address ) )
433 if config.get('server','password') == data:
434 out = repr( len(sessions) )
436 out = 'wrong password'
440 print "sent tx:", out
442 elif cmd =='clear_cache':
443 if config.get('server','password') == data:
447 out = 'wrong password'
449 elif cmd =='get_cache':
455 if config.get('server','password') == pw:
456 out = store.tx_cache.get(addr)
459 out = 'wrong password'
461 out = "error: "+ repr(data)
465 if config.get('server','password') == data:
469 out = 'wrong password'
472 out = repr(peer_list.values())
478 #print ipaddr, cmd, len(out)
482 print "error, could not send"
488 ds = BCDataStream.BCDataStream()
493 def memorypool_update(store):
494 conn = bitcoinrpc.connect_to_local()
496 v = conn.getmemorypool()
498 print "cannot contact bitcoin daemon"
500 v = v['transactions']
503 ds.write(hextx.decode('hex'))
504 tx = deserialize.parse_Transaction(ds)
505 tx['hash'] = util.double_sha256(tx['tx'])
506 if store.tx_find_id_and_value(tx):
509 #print "new tx", tx['hash'][::-1].encode('hex')
510 store.import_tx(tx, False)
517 def clean_session_thread():
521 for k,s in sessions.items():
524 print time.asctime(), "lost session",k
530 NICK = 'E_'+random_string(10)
534 s.connect(('irc.freenode.net', 6667))
535 s.send('USER electrum 0 * :'+config.get('server','host')+' '+config.get('server','ircname')+'\n')
536 s.send('NICK '+NICK+'\n')
537 s.send('JOIN #electrum\n')
538 sf = s.makefile('r', 0)
542 line = line.rstrip('\r\n')
545 s.send('PONG '+line[1]+'\n')
546 elif '353' in line: # answer to /names
547 k = line.index('353')
548 for item in line[k+1:]:
549 if item[0:2] == 'E_':
550 s.send('WHO %s\n'%item)
551 elif '352' in line: # answer to /who
552 # warning: this is a horrible hack which apparently works
553 k = line.index('352')
555 ip = socket.gethostbyname(ip)
558 peer_list[name] = (ip,host)
559 if time.time() - t > 5*60:
560 s.send('NAMES #electrum\n')
564 traceback.print_exc(file=sys.stdout)
573 if __name__ == '__main__':
578 request = "('load','%s')#"%config.get('server','password')
580 request = "('peers','')#"
582 request = "('stop','%s')#"%config.get('server','password')
583 elif cmd == 'clear_cache':
584 request = "('clear_cache','%s')#"%config.get('server','password')
585 elif cmd == 'get_cache':
586 request = "('get_cache',('%s','%s'))#"%(config.get('server','password'),sys.argv[2])
588 request = "('h','%s')#"%sys.argv[2]
590 request = "('b','')#"
592 s = socket.socket( socket.AF_INET, socket.SOCK_STREAM)
593 s.connect((config.get('server','host'), config.getint('server','port')))
605 print "starting Electrum server"
606 print "cache:", config.get('server', 'cache')
608 conf = DataStore.CONFIG_DEFAULTS
609 args, argv = readconf.parse_argv( [], conf)
610 args.dbtype= config.get('database','type')
611 if args.dbtype == 'sqlite3':
612 args.connect_args = { 'database' : config.get('database','database') }
613 elif args.dbtype == 'MySQLdb':
614 args.connect_args = { 'db' : config.get('database','database'), 'user' : config.get('database','username'), 'passwd' : config.get('database','password') }
615 elif args.dbtype == 'psycopg2':
616 args.connect_args = { 'database' : config.get('database','database') }
617 store = MyStore(args)
619 store.ismempool = False
621 thread.start_new_thread(listen_thread, (store,))
622 thread.start_new_thread(clean_session_thread, ())
623 if (config.get('server','irc') == 'yes' ):
624 thread.start_new_thread(irc_thread, ())
630 store.ismempool = True
631 memorypool_update(store)
632 store.ismempool = False
633 block_number = store.get_block_number(1)
636 traceback.print_exc(file=sys.stdout)
639 print "server stopped"