#! /usr/bin/python

#coding=utf-8
import requests,re,json,time,os
import heapq
import sys
sys.path.append(r"/home/python_workspace/project_scrapy/eastmoney/")
#sys.path.append(r"E:/workspace/project_scrapy/eastmoney/")
import dbs
import log

from bs4 import BeautifulSoup

logging = log.Logger('logs/eastmoney.log',level='info').logger

class GPINFO(object):
    database = dbs.MySQL()

    """docstring for GPINFO"""
    def __init__(self):
        self.Url = 'http://quote.eastmoney.com/stocklist.html'
        self.BaseData = []
        self.Date = time.strftime('%Y%m%d')
        self.Record = 'basedata'+self.Date
        if os.path.exists(self.Record):
            logging.info ('record exist...')
            self.BaseData = self.get_base_data_from_record()
        else:
            logging.info('fuck-get data again...')
            self.get_data()

    #将数据写入到记录文件
    def write_record(self,text):
        with open(self.Record,'ab') as f:
            f.write((text+'\n').encode('utf-8'))

    #从记录文件从读取数据
    def get_base_data_from_record(self):
        ll = []
        with open(self.Record,'rb') as f:
            json_l = f.readlines()
            for j in json_l:
                ll.append(json.loads(j.decode('utf-8')))
        return ll

    #爬虫获取数据
    def get_data(self):
        #请求数据
        response = requests.get(self.Url)

        orihtml = response.content
        #创建 beautifulsoup 对象
        soup = BeautifulSoup(orihtml,'html.parser',from_encoding="gb18030")
        #logging.info("soup:",soup)

        #采集每一个股票的信息
        count = 0
        for a in soup.find('div',class_='quotebody').find_all('a',{'target':'_blank'}):
            record_d = {}

            if a.get_text() == "":
                continue

            #代号
            num = a.get_text().split('(')[1].strip(')')  #获取股票代号
            #if "002752" != num:
            #    continue

            if not (num.startswith('00') or num.startswith('60')):continue #只需要6*/0*    只要以00或60开头的股票代号
            record_d['num']=num
            #名称
            name = a.get_text().split('(')[0]  #获取股票名称
            record_d['name']=name
            #详情页
            detail_url = a['href']
            record_d['detail_url']=detail_url

            cwzburl = detail_url
            #发送请求
            try:
                cwzbhtml = requests.get(cwzburl,timeout=30).content  #爬取股票详情页
            except Exception as e:
                logging.info ('perhaps timeout:',e)
                continue
            #创建soup对象
            cwzbsoup = BeautifulSoup(cwzbhtml,'html.parser',from_encoding="gb18030")

            #财务指标列表 [浦发银行，总市值    净资产    净利润    市盈率    市净率    毛利率    净利率    ROE] roe:净资产收益率
            try:
                cwzb_list = cwzbsoup.find('div',class_='cwzb').tbody.tr.get_text().split()  #获取class为cwzb的div下第一个tbody下第一个tr获取内部文本，并使用空格分割
            except Exception as e:
                logging.info ('error:',e)
                continue
            #去除退市股票
            if '-' not in cwzb_list:
                record_d['data']=cwzb_list   #将数据加入到字典中
                self.BaseData.append(record_d)  #将字典加入到总数据总
                #self.write_record(json.dumps(record_d))  #将字典类型转化为字符串，写入文本
                total_market_value = cwzb_list[1]
                if '亿' in total_market_value:
                    total_market_value = total_market_value.strip('亿')
                elif '千万' in total_market_value:
                    total_market_value = float(total_market_value.strip('千万'))/ 10 #转换成单位亿
                elif '百万' in total_market_value:
                    total_market_value = float(total_market_value.strip('百万'))/ 100 #转换成单位亿

                net_assets = cwzb_list[2]
                if '亿' in net_assets:
                    net_assets = net_assets.strip('亿')
                elif '千万' in net_assets:
                    net_assets = float(net_assets.strip('千万'))/ 10 #转换成单位亿
                elif '百万' in net_assets:
                    net_assets = float(net_assets.strip('百万'))/ 100 #转换成单位亿
                net_profit = cwzb_list[3]

                if '亿' in net_profit:
                    net_profit = net_profit.strip('亿')
                elif '千万' in net_profit:
                    net_profit = float(net_profit.strip('千万'))/ 10 #转换成单位亿
                elif '百万' in net_profit:
                    net_profit = float(net_profit.strip('百万'))/ 100 #转换成单位亿

                pe = cwzb_list[4]
                pb = cwzb_list[5]
                reo = cwzb_list[8].strip('%')

                logging.info(record_d)
                if reo != '0.00':
                    per_cankao = round(float(pb) * float(pe) / float(reo),5)
                else:
                    per_cankao = 0

                #查询该股票是否当天已统计
                result = self.database.query_dic({
                    'select': 'num',
                    'from': 't_stock_detail',
                    'where': {
                        'num':num,
                        'statistics_date':time.strftime('%Y%m%d')
                        }
                })
                #数据库没有则插入数据
                if bool(result) != True:
                    #将数据写入数据库
                    self.database.query_dic({
                        'insert': 't_stock_detail',
                        'domain_array':[
                            'num','name','target_url','total_market_value','net_assets','net_profit','pe','pb','gross_rate','net_rate','reo','per_cankao','statistics_date','create_date','update_date'
                        ],
                        'value_array':[
                            num,name,detail_url,str(total_market_value),str(net_assets),str(net_profit),pe,pb,cwzb_list[6].strip('%'),cwzb_list[7].strip('%'),reo,str(per_cankao),
                            time.strftime('%Y%m%d'),str(time.time()),str(time.time())
                        ]
                    })
                count=count+1
                #logging.info (len(self.BaseData))

def main():
    test = GPINFO()
    result = test.BaseData
    #[浦发银行，总市值    净资产    净利润    市盈率    市净率    毛利率    净利率    ROE] roe:净资产收益率]
    top_10 = heapq.nlargest(10,result,key=lambda r:float(r['data'][7].strip('%')))   #获取前10名利率最高者的数据
    for item in top_10:
        for key in item['data']:
            logging.info(key),
        logging.info('\n')
#打印字符串时，使用logging.info str.encode('utf8');
#打印中文列表时，使用循环 for key in list：logging.info key
#打印中文字典时，可以使用循环，也可以使用json：
#  import json
# logging.info json.dumps(dict, encoding='UTF-8', ensure_ascii=False)


if __name__ == '__main__':
    main()

