"""
数据抓取，完成策略需要的日K数据（不复权和后复权）以及财报的抓取，
并把抓取的结果存储到MongoDB中，数据库名是quant
包含了以下几个方法：
crawlStockList: 抓取沪深所有股票的代码和名称，并保存到stock中
crawlSinglePage: 一个工具方法，抓取单页的股票列表数据
getAllCodes: 从数据库中获取所有的股票列表，如果找不到数据，则抓取
crawlKData: 抓取股票的日行情数据，支持抓取未复权和后复权两种类型的数据
crawlZSKdata: 抓取上证综指和沪深300的日K数据，指数没有复权的情况，所以只保存到daily_k中
crawlFinanceReport: 抓取财报数据，主要关注EPS、公告日期、报告期
"""

import urllib3
import traceback
import json
import sys
from datetime import datetime
from pymongo import MongoClient, UpdateOne

userAgent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'

db = MongoClient('mongodb://localhost:27017')['quant']

def crawlSinglePage(page):
    """
    抓取单页数据
    :return:
    """
    url1 = 'http://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?' \
          'cb=&type=CT&token=4f1862fc3b5e77c150a2b985b12db0fd&sty=FCOIATC&' \
          'js=%7Bdata%3A%5B(x)%5D%2CrecordsFiltered%3A(tot)%7D&cmd=C._A&st=(ChangePercent)&sr=-1&p={0}&ps=200'

    url2 = 'http://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?' \
           'cb=&type=CT&token=4f1862fc3b5e77c150a2b985b12db0fd&sty=FCOIATC&' \
           'js={%22data%22:[(x)],%22recordsFiltered%22:(tot)}&cmd=C._A&st=(ChangePercent)&sr=-1&p={0}&ps=200&_=1534816198410'

    try:

        connPool = urllib3.PoolManager()
        response = connPool.request('GET', url2.replace('{0}', str(page)), headers={'User-Agent': userAgent})
        return response.data.decode('UTF-8')
    except:
        traceback.print_exc()
        return  None

def crawlStockList():
    """
    抓取股票代码列表，并存储到MongoDB中
    :return:
    """
    stockCodes = []
    updateRequests = []

    #分页抓取，直到获取所有股票代码
    crawledTotal = 0
    page = 1

    while True:
        #抓取当前页数据
        response = crawlSinglePage(page)
        print(response)

        if response is None:
            break

        #转化为JSON
        data = json.loads(response)

        # 解析获取的所有股票代码
        stocks = data['data']
        print(stocks)
        for stock in stocks:
            fields = stock.split(",")
            print("code:{0}, name:{1}".format(fields[1], fields[2]))
            #存储到MongoDB中的数据结构
            doc = {
                'code': fields[1],
                'name': fields[2],
                'update_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            }

            #生成更新数据的请求
            updateRequests.append(
                UpdateOne(
                    {'code': fields[1]},
                    {'$set': doc},
                    upsert=True
                )
            )


            crawledTotal += 1

        #判断是否抓取到最后一页，如果已经到了最后一页，则结束股票列表的抓取
        total = data['recordsFiltered']
        print("Crawled {0}/{1}".format(crawledTotal, total))
        if crawledTotal == total:
            break
        page += 1

    #保存到数据库中
    if (len(updateRequests) > 0):
        updateResult = db.stock.bulk_write(updateRequests, ordered=False)
        print('Update stock collection, modified:{0}, upserted:{1}'.format(updateResult.modified_count, updateResult.upserted_count))

def getAllCodes():
    """
    获取所有股票代码的列表
    :return:
    """
    stockCursor = db.stock.find(projection={'code': True, '_id': False})

    codes = [x['code'] for x in stockCursor]
    retriedTimes = 0

    while len(codes) == 0 and retriedTimes < 10:
        crawlStockList()
        stockCursor = db.stock.find(projection={'code': True, '_id': False})
        codes = [x['code'] for x in stockCursor]
        retriedTimes += 1

    return codes

def crawlKData(authorityType):
    #先获取所有的股票列表
    codes = getAllCodes()

    #创建连接池
    connPool = urllib3.PoolManager()
    url = 'http://pdfm.eastmoney.com/EM_UBG_PDTI_Fast/api/js?token=4f1862fc3b5e77c150a2b985b12db0fd' \
          '&rtntype=1&id={0}&type=k&authorityType={1}&cb='

    count = 0
    #抓取所有股票
    for code in codes:
        print(code)
        response = None
        #不同的股票设置不同的后缀
        stockKUrl = url
        if code[0:2] == '60':
            stockKUrl = url.replace('{0}', code + '1')
        elif code[0:2] in ['30', '00']:
            stockKUrl = url.replace('{0}', code + '2')

        stockKUrl = stockKUrl.replace('{1}', authorityType)
        response = connPool.request('GET', stockKUrl, headers={'User-Agent': userAgent})

        #如果获取不到Response,则跳过股票
        if response is None:
            continue

        #解析抓取结果
        hqStr = response.data.decode('UTF-8')[1:-1]
        hqLines = hqStr.split('\n')

        #更新的requests
        updateRequests = []
        for line in hqLines:
            if len(line) > 0:
                #逗号分隔
                fields = line.split(',')

                volume = int(fields[5]) * 100
                if volume > 0:
                    # 成交大于0,证明是交易
                    tradeStatus = '交易'
                    # 成交额，转化为元
                    if fields[6][-1] in ['亿', '万']:
                        amount = float(fields[6][0:-1])
                        amountUnit = fields[6][-1]
                        if amountUnit == '亿':
                            amount *= 1e8
                        elif amountUnit == '万':
                            amount *= 1e4
                    else:
                        amount = float(fields[6])
                else:
                    # 成交量等于0，证明是停牌
                    amount = 0
                    tradeStatus = '停牌'

                #构造保存用的document
                doc = {
                    #股票代码
                    'code': code,
                    #日期
                    'time': fields[0],
                    #开盘价
                    'open': float(fields[1]),
                    #收盘价
                    'close': float(fields[2]),
                    #最高价
                    'high': float(fields[3]),
                    #最低价
                    'low': float(fields[4]),
                    #成交量,转化为股数
                    'volume': volume,
                    #成交额
                    'amount': amount,
                    #交易状态
                    'trade_status': tradeStatus

                }

                #创建更新请求
                updateRequests.append(
                    UpdateOne(
                        {'code':code, 'time': fields[0]},
                        {'$set': doc},
                        upsert=True
                    )
                )

        #分别保存了到不同的行情表
        collectionName = 'daily_k'

        if authorityType == 'ba':
            collectionName = 'daily_k_ba'
        if len(updateRequests) > 0:
            updateResult = db[collectionName].bulk_write(updateRequests, ordered=False)
            print('code:{0}, modified:{1}, upserted:{2}'.format(code, updateResult.modified_count, updateResult.upserted_count))

def crawlZSKdata():
    #先获取所有的股票列表
    codes = ['000001', '000300']

    # 创建连接池
    connPool = urllib3.PoolManager()

    url = 'http://pdfm.eastmoney.com/EM_UBG_PDTI_Fast/api/js?' \
          'rtntype=1&token=4f1862fc3b5e77c150a2b985b12db0fd&cb=' \
          'jQuery183010169580872648432_1535338664760&id={0}1&type=k&authorityType=ba&_=1535338665237'

    # 抓取所有股票
    for code in codes:
        response = connPool.request('GET', url.replace('{0}', code), headers={'User-Agent': userAgent})

        # 如果获取不到Response,则跳过股票
        if response is None:
            continue

        # 解析抓取结果
        hqStr = response.data.decode('UTF-8')[1:-1]

        hqLines = hqStr.split("\n")

        # 更新的requests
        updateRequests = []
        # 获取每一个字段
        for line in hqLines:
            if len(line) > 0:
                # 逗号分隔
                fields = line.split(',')

                volume = int(fields[5]) * 100

                #指数的状态一直为交易
                tradeStatus = '交易'
                # 成交额，转化为元
                if fields[6][-2:] == '万亿':
                    amount = float(fields[6][0:-2]) * 1e7 * 1e4
                elif fields[6][-1] in ['亿', '万']:
                    amount = float(fields[6][0:-1])
                    amountUnit = fields[6][-1]
                    if amountUnit == '亿':
                        amount += amount * 1e8
                    elif amountUnit == '万':
                        amount += amount * 1e4
                else:
                    amount = float(fields[6])

                # 构造保存了用的document
                    doc = {
                        # 股票代码
                        'code': code + '.SH',
                        # 日期
                        'time': fields[0],
                        # 开盘价
                        'open': float(fields[1]),
                        # 收盘价
                        'close': float(fields[2]),
                        # 最高价
                        'high': float(fields[3]),
                        # 最低价
                        'low': float(fields[4]),
                        # 成交量,转化为股数
                        'volume': volume,
                        # 成交额
                        'amount': amount,
                        # 交易状态
                        'trade_status': tradeStatus
                    }

                    # 创建更新的请求
                    updateRequests.append(
                        UpdateOne(
                            {'code': code + '.SH', 'time': fields[0]},
                            {'$set': doc},
                            upsert=True
                        )
                    )
        # 保存到数据库
        if len(updateRequests) > 0:
            updateResult = db.daily_k.bulk_write(updateRequests, ordered=False)
            print('code:{0}, modified:{1}, upserted:{2}'.format(code, updateResult.modified_count, updateResult.upserted_count))

def crawlFinanceReport():
    # 先获取所有的股票列表
    codes = getAllCodes()

    # 创建连接池
    connPool = urllib3.PoolManager()

    url = 'http://dcfm.eastmoney.com//em_mutisvcexpandinterface/api/js/get?type=YJBB20_YJBB&' \
          'token=70f12f2f4f091e459a279469fe49eca5&st=reportdate&sr=-1&filter=(scode={0})' \
          '&p=1&ps=100&js={"pages":(tp),"data":%20(x)}'

    cookie = 'st_pvi=27990368714517; st_si=95186323355355; qgqp_b_id=ac96ba3c547b122e270c9072881bace0; ' \
             'st_asi=delete; EMSTtokenId=f2db722e612b8989fc01244aa0e6235c; st_sn=3; st_psi=20180830103200146-111000300841-0596698071'
    # codes = ['600100']
    for code in codes:

        response = connPool.request('GET', url.replace('{0}', code),
                                    headers={
                                        'Cookie': cookie,
                                        'User-Agent': userAgent
                                    })


        #  解析抓取结果
        result = json.loads(response.data.decode('UTF-8'))

        reports = result['data']
        # print(reports)
        updateRequests = []

        for report in reports:
            doc = {
                # 报告日期
                'report_date': report['reportdate'][0:10],
                # 公告日期
                'announced_date': report['latestnoticedate'][0:10],
                #  每股收益
                'eps': report['basiceps'],
                'code': report['scode']
            }

            updateRequests.append(
                UpdateOne(
                    {'code': code, 'report_date': doc['report_date']},
                    {'$set': doc},
                    upsert=True
                )
            )
        print(updateRequests)
        if len(updateRequests) > 0:
            updateResult = db.finance_report.bulk_write(updateRequests, ordered=False)
            print("股票{0}, 财报更新{1}, 插入{2}".format(code, updateResult.modified_count, updateResult.upserted_count))


if __name__ == '__main__':
    # crawlStockList()
    # getAllCodes()
    # crawlKData('ba')
    # crawlFinanceReport()
    if len(sys.argv) <= 1:
        print("请选择获取的数据类型： k - 未复权的日K线数据, kba - 后复权的日K数据， fin - 财务， zs - 指数")
    elif sys.argv[1] == 'k':
        crawlKData('')
    elif sys.argv[1] == 'kba':
        crawlKData('ba')
    elif sys.argv[1] == 'fin':
        crawlFinanceReport()
    elif sys.argv[1] == 'zs':
        crawlZSKdata()
    else:
        print("类型不对：k - 未复权的日K线数据, kba - 后复权的日K数据， fin - 财务， zs - 指数")