'''
Created on 2011-10-26
test for rawcontent ,errlist,jobslist use dict instead of file
@author: Administrator
'''
import os
import urllib2
import sqlite3
import datetime
import threading
import Queue
import time
import shutil 
import simplejson
from setting import *
joblist={1:1}
loglist={1:1}
class StockList():
    '''
    get A,HK,US stock list from all vendor
    '''
    def getstocklist(self,reg):
        if reg=='SH':return self.getSHstocklistfromSINA()
        elif reg=='HK':return self.getHKstocklistfromSINA()
        elif reg=='NS':return self.getUSstocklistfromSINA()
        elif reg=='SZ':return self.getSZstocklistfromSINA()
        else:print 'error reg'
    def getstocklist2(self,reg):
        if reg=='CH':return self.getCNstocklist()
        elif reg=='HK':return self.getHKstocklist()
        elif reg=='US':return self.getUSstocklist()
        else:print 'error reg'
    def getusstocklistfromimeigu(self):
        def getonepage(page):
            url='http://hq.imeigu.com/list.jsp?od=0&ac=0&&pagex='+str(page)
            content=urllib2.urlopen(url).read().decode('utf8')
            s=content.find('<table cellspacing="0">')
            p=content.find('</table',s)
            nlist=content[s:p].split('<tr')
            stocklist={}
            for n in nlist:
    #            print n
                tlist=n.split('<td')
                onestock=[]
                tstock=[]
                for t in tlist:
                    p=t.find('</td')
                    k=t.rfind('</a',0,p)
                    if k>0:p=k
                    s=t.rfind('>',0,p)
                    v=t[s:p].strip(' >\t\n')
                    tstock.append(v)
    #                print '[',v,']',len(v)
                    if len(tstock)>4:
                        onestock.append(tstock[1])
                        onestock.append(tstock[2])
                
            stocklist[onestock[1]]=onestock[2]
            return len(nlist)
        page=1
        ret=getonepage(page)
        print ret
    #    while ret==30:
    #        page=1+page
    #        ret=getonepage(page)         
        return []
    def getHKstocklistfrombloomberg(self):
        def parseonestock(src):
            nlist=src.split('<td')
            ret=[]
            for n in nlist:
                
                p=n.find('</td')
                k=n.rfind('</span',0,p)
                if k>0:p=k
                s=n.rfind('>',0,p)
                v=n[s:p].strip('>\n\t ')
                if len(v)>0: ret.append(v)
            return ret
            pass
        def getonepage(page=1):
            url='http://www.bloomberg.com/markets/companies/country/usa/'+str(page)+'/'
    #        url='http://www.bloomberg.com/markets/companies/country/hong-kong/'+str(page)+'/'
    #        url='http://www.bloomberg.com/markets/companies/country/china/'+str(page)+'/'
            content=urllib2.urlopen(url).read()
            s=content.find('<table class="ticker_data">')
            p=content.find('</table',s)
            nlist=content[s:p].split('<tr')
            stocklist=[]
            for n in nlist:
                ret=parseonestock(n)
                if len(ret)>0:stocklist.append(ret)
            return stocklist
        stocklist=[]
        tlist=[]
        page=1
        tlist=getonepage(page)
        print "page:",page," tlist len:",len(tlist)
        if len(tlist)==0:return 0
        else:
            for t in tlist:
                stocklist.append(t)
            while len(tlist)>0:
                page=page+1
                tlist=getonepage(page)
                print "page:",page," tlist len:",len(tlist)
                for t in tlist:
                    stocklist.append(t)           
        return stocklist
    def getAstocklistfrombloomberg(self):
        def parseonestock(src):
            nlist=src.split('<td')
            ret=[]
            for n in nlist:
                
                p=n.find('</td')
                k=n.rfind('</span',0,p)
                if k>0:p=k
                s=n.rfind('>',0,p)
                v=n[s:p].strip('>\n\t ')
                if len(v)>0: ret.append(v)
            return ret
            pass
        def getonepage(page=1):
            url='http://www.bloomberg.com/markets/companies/country/china/'+str(page)+'/'
            content=urllib2.urlopen(url).read()
            s=content.find('<table class="ticker_data">')
            p=content.find('</table',s)
            nlist=content[s:p].split('<tr')
            stocklist=[]
            for n in nlist:
                ret=parseonestock(n)
                if len(ret)>0:stocklist.append(ret)
            return stocklist
        stocklist=[]
        tlist=[]
        page=1
        tlist=getonepage(page)
        print "page:",page," tlist len:",len(tlist)
        if len(tlist)==0:return 0
        else:
            for t in tlist:
                stocklist.append(t)
         
        return stocklist
    def getHKstocklistfromSINA(self):
        url='http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHKStockData?page=1&num=8000&sort=symbol&asc=1&node=qbgg_hk&_s_r_a=page'
#        print url
        nlist=urllib2.urlopen(url).read().decode('gb2312').strip('[]{}').split('},{')  
#        print len(nlist),nlist[0]
        stocklist={}  
        for n in nlist:
            tlist=n.split(',')
            rlist={}
            for t in tlist:
                plist=t.split(':')
                rlist[plist[0]]=plist[1].strip('"')
            stocklist[rlist['symbol']]=rlist['name']
#            print rlist
#            break
        return stocklist
    def getUSstocklistfromSINA(self):
        nodelist=['china_us','yysp_us','meida_us','auto_us','sales_us','finance_us','tech_us']
        urlpre='http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getUSList?page=1&num=8000&sort=chg&asc=0&_s_r_a=init&node='
        stocklist={}
        for node in nodelist:
            url=urlpre+node
            nlist=urllib2.urlopen(url).read().strip('{}[]').split('},{')
#            print node,len(nlist)
            for n in nlist:
                tlist=n.split(':')
                code=tlist[1].strip('"')
                stocklist[code]=code
#        print len(stocklist),stocklist
        return stocklist
    def getSHstocklistfromSINA(self):
        url='http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeData?page=1&num=20000&sort=symbol&asc=1&node=sh_a&_s_r_a=init'
        content=urllib2.urlopen(url).read().decode('gb2312')
#        print content
        clist=content.strip(' []{}').split('},{')
#        print len(clist)
        stocklist={}
        for c in clist:
            flist=c.split(',')
            fsone={}
            for f in flist:
                tlist=f.split(':')
                fsone[tlist[0]]=tlist[1].strip('"')
            stocklist[fsone['code']]=fsone['name']
        return stocklist
    def getSZstocklistfromSINA(self):
        url='http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeData?page=1&num=20000&sort=symbol&asc=1&node=sz_a&_s_r_a=init'
        content=urllib2.urlopen(url).read().decode('gb2312')
#        print content
        clist=content.strip(' []{}').split('},{')
#        print len(clist)
        stocklist={}
        for c in clist:
            flist=c.split(',')
            fsone={}
            for f in flist:
                tlist=f.split(':')
                fsone[tlist[0]]=tlist[1].strip('"')
            stocklist[fsone['code']]=fsone['name']
        return stocklist
class ThreadStockDataDownload(threading.Thread):
    def __init__(self):
        threading.Thread.__init__(self)
        self.queue=urlqueue
    def run(self):
        job=self.queue.get()  
        reg=job[:2]
        if reg=='SZ' or reg=='SH':reg='CH'
        if reg=='NS':reg='US'
        stockcode=job[2:]+':'+reg
        if reg=='HK':stockcode=stockcode.strip('0')
#        print 'thread stockcode:',stockcode
        url='http://www.bloomberg.com/apps/data?pid=webpxta&Securities='+stockcode+'&TimePeriod=5Y&Outfields=HDATE,PR006-H,PR007-H,PR008-H,PR005-H,PR013-H'
        
        stockcode2=job.replace(':','_')
        try:
            content=urllib2.urlopen(url).read()
        except:
            print '[x]',#url   
            loglist[job]='except'
            self.queue.task_done()  
            return 
        if content[:5]=='Error':
            print '[r]',#url
            loglist[job]='Error'
            self.queue.task_done()
        else:
            clist=content.split('\n')
            tlist=''
            for t in range(1,len(clist)-2):
                tlist=tlist+clist[t]+'\n'
            nlist=job.split(':')
            fname=os.path.join(rawcontentpath,job)
            f=open(fname,'w')
            f.write(tlist)
#            f.write(tlist.replace('"',' '))
            f.close()  
            joblist[job]=content
            self.queue.task_done()
class ThreadMain(threading.Thread):
    def __init__(self,num):
        threading.Thread.__init__(self)
        self.num=num
    def run(self):          
#        print 'g_jobsnumber',self.num
        for i in range(self.num):
            dt=ThreadStockDataDownload()
            dt.start()
        urlqueue.join()
 
 
class stock2011():
    def __init__(self):
        if not os.path.exists(rawcontentpath):os.mkdir(rawcontentpath)
    def clearpathfile(self,mypath):
        for root,dirs,files in os.walk(mypath):
            for f in files:
                os.remove(os.path.join(root,f))  
    def geterrorjobslist(self):
        errorlist=self.getpathfilelist(errordatapath)
        return errorlist
    def getpathfilelist(self,mypath,filter=''):
        flist=[]
        for root,dirs,files in os.walk(mypath):
            for f in files:
                if filter=='':flist.append(f.replace('_',':'))
                else:
                    if f.find(filter)>0:flist.append(f.replace('_',':'))
        return flist     
   
    def downloadstart(self,stockall):
        self.clearpathfile(rawcontentpath)
        joblist.clear()
        loglist.clear()
        
        jobs=StockList()
        ######################################
        for reg in stockall.keys():
            print len(stockall[reg]),
        for reg in stockall.keys():
            for stock in stockall[reg].keys():                
                joblist[reg+stock]=''
#        print 'joblist1:',len(joblist),joblist.keys()
##################################################
        todolist=[]
        todolist=self.getsomejobs()
        g_jobsnumber=len(todolist) 
#        print 'this round todo:',len(todolist)
        while g_jobsnumber>0:
            print 'this round todo:',g_jobsnumber,datetime.datetime.now()
            for job in todolist:
                urlqueue.put(job,block=True ,timeout= threadtimeout)             
            td=ThreadMain(g_jobsnumber)
            td.start()
            td.join(10)
            todolist=self.getsomejobs()
            g_jobsnumber=len(todolist)  
#        jsonfile=os.path.join(foxdata_rootpath,'rawcontent.json')
#        simplejson.dump(joblist,open(jsonfile,'w'))
    def getsomejobs(self):
        todolist=[]
#        print 'joblist:',len(joblist)
        for job in joblist:
            if joblist[job]=='':todolist.append(job)
        print 'left jobs:',len(todolist)
        errorlist=[]
        for log in loglist:
            if loglist[log]=='error':errorlist.append(log)
        
#        loglist={}
#        print 'todolist:',todolist
#        print errorlist
#        print todolist==errorlist
        if todolist==errorlist:return []
        else:            
            jobnum=len(todolist)
#            print 'go here',jobnum,workers_number,jobnum<=workers_number
            if jobnum<=workers_number:
                return todolist
            else:
#                print todolist[:workers_number]
                return todolist[:workers_number]

    def checkstockfile(self):
        rawlist=[]
        for root,dirs,files in os.walk(rawcontentpath):
            for f in files:
                rawlist.append(f)
                fname=os.path.join(root,f)
                f=open(fname,'r')
                content=f.read()
                f.close()
                tlist=content.split('\n')
                s=tlist[0].find('Count')
                p=tlist[0].find('=',s)
                q=tlist[0].find('"',p)
                num=int(tlist[0][p+1:q])
                flines=len(tlist)
                if flines==num+3:pass
                else:print 'checkstockfile failed:',len(tlist),num,fname
                
        print 'raw file num:',len(rawlist),rawlist[0]
        dblist=[]
        for reg in reglist:
            rlist=self.getpathfilelist(os.path.join(stockbycodepath,reg))
            for r in rlist:
                p=r.find('.')
                dblist.append(r[:p]+'_'+reg)
        print 'db file num:',len(dblist),dblist[0]
        
        for raw in rawlist:
            if raw in dblist:pass
            else:print raw,'not in dblist'
        return


if __name__ == '__main__':
    starttime=datetime.datetime.now()
    print 'this is packStockTakeData  tasklist',starttime   
    s=StockList()
    slist=s.getSZstocklistfromSINA()
    print slist
    print 'stocknum:',len(slist)
    sj=stock2011()    
    sj.downloadstart({'SZ':slist})
    print 'download OK',starttime,(datetime.datetime.now()-starttime).seconds
#    jsonfile=os.path.join(foxdata_rootpath,'rawcontent.json')
#    slist=simplejson.load(open(jsonfile,'r'))
#    print len(slist)
#    for s in slist.keys():
#        if len(slist[s])<10:print s,
#    sj.dostockdb()
#    print 'dostockdb OK',starttime,datetime.datetime.now()
#    sj.checkstockfile()
#    print 'checkstockfile OK',starttime,datetime.datetime.now()
#    sj.trans2bydate()
#    print 'start and end time:',starttime,datetime.datetime.now()
#    clearallfile()