# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
"""
数据抓取，完成策略需要的日K数据（不复权和后复权）以及财报的抓取，
并把抓取的结果存储到MongoDB中，数据库名是quant
包含了以下几个方法：
crawl_stock_list: 抓取沪深所有股票的代码和名称，并保存到stock中
crawl_single_page: 一个工具方法，抓取单页的股票列表数据
get_all_codes: 从数据库中获取所有的股票列表，如果找不到数据，则抓取
crawl_k_data: 抓取股票的日行情数据，支持抓取未复权和后复权两种类型的数据
crawl_zs_k_data: 抓取上证综指和沪深300的日K数据，指数没有复权的情况，所以只保存到daily_k中
crawl_finance_report: 抓取财报数据，主要关注EPS、公告日期、报告期
"""

import json
import sys
import traceback
import urllib3
from datetime import datetime
from pymongo import MongoClient, UpdateOne

db = MongoClient('mongodb://39.107.106.234:27017')['quant']

user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36'


def crawl_stock_list():
    """
    抓取股票代码列表，并存储到MongoDB中
    """
 
    stock_codes = []
    update_requests = []

    # 分页抓取，直到获取所有的股票代码
    crawled_total = 0
    page = 1
    while 1:
        # 抓取当前页数据
        response = crawl_single_page(page)
        print(response)

        if response is None:
            break

        # 转化为JSON
        data = json.loads(response, 'UTF-8')

        # 解析获取所有股票代码
        stocks = data['data']
        for stock in stocks:
            fields = stock.split(',')
            print('code: %s, name: %s' % (fields[1], fields[2]))
            # 存储到MongoDB中的数据结构
            doc = {
                'code': fields[1],
                'name': fields[2],
                'update_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            }
            # 生成更新数据的请求
            update_requests.append(
                UpdateOne(
                    {'code': fields[1]},
                    {'$set': doc},
                    upsert=True))
            crawled_total += 1

        # 判断是否抓取到最后一页，如果已经到了最后一页，则结束股票列表的抓取
        total = data['recordsTotal']
        print('Crawled %d/%d' % (crawled_total, total))
        if crawled_total == total:
            break

        page += 1

    # 保存到数据库中
    if len(update_requests) > 0:
        update_result = db.stock.bulk_write(update_requests, ordered=False)
        print('Update stock collection, modified: %4d, upserted: %4d' %
              (update_result.modified_count, update_result.upserted_count))


def crawl_single_page(page):
    """
    抓取单页数据
    """
    url = 'http://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?cb=&type=CT&token=4f1862fc3b5e77c150a2b985b12db0fd&js=%7B%22data%22%3A%5B(x)%5D%2C%22recordsTotal%22%3A(tot)%2C%22recordsFiltered%22%3A(tot)%7D&cmd=C._A&sty=FCOIATC&st=(ChangePercent)&sr=-1&p={0}&ps=200'

    # url = 'http://www.baidu.com'

    try:
        # 创建连接池
        conn_pool = urllib3.PoolManager()
        response = conn_pool.request('GET', url.replace('{0}', str(page)), headers={'User-Agent': user_agent})

        return response.data.decode('UTF-8')

    except:
        traceback.print_exc()
        return None


def get_all_codes():
    """
    获取所有股票代码的列表
    """

    stock_cursor = db.stock.find(projection={'code': True, '_id': False})
    codes = [x['code'] for x in stock_cursor]
    retried_times = 0

    while len(codes) == 0 and retried_times < 10:
        crawl_stock_list()
        stock_cursor = db.stock.find(projection={'code': True, '_id': False})
        codes = [x['code'] for x in stock_cursor]
        retried_times += 1

    return codes


def crawl_k_data(authorityType):
    # 先获取所有的股票列表
    codes = get_all_codes()

    # 创建连接池
    conn_pool = urllib3.PoolManager()

    url = 'http://pdfm.eastmoney.com/EM_UBG_PDTI_Fast/api/js?rtntype=1&token=4f1862fc3b5e77c150a2b985b12db0fd&id={0}&type=k&authorityType={1}&cb='

    # 抓取所有股票
    for code in codes:
        response = None
        # 不同的股票设置不同的后缀
        stock_k_url = url
        if code[0:2] == '60':
            stock_k_url = url.replace('{0}', code + '1')
        elif code[0:2] in ['30', '00']:
            stock_k_url = url.replace('{0}', code + '2')

        stock_k_url = stock_k_url.replace('{1}', authorityType)
        response = conn_pool.request('GET', stock_k_url, headers={'User-Agent': user_agent})

        # 如果获取到Response，则跳过股票
        if response is None:
            continue

        # 解析抓取结果
        hq_str = response.data.decode('UTF-8')[1:-1]

        hq_lines = hq_str.split("\n")

        # 更新的requests
        update_requests = []
        # 获取每一个字段
        for line in hq_lines:
            if len(line) > 0:
                # 逗号分隔
                fields = line.split(',')

                volume = int(fields[5]) * 100

                if volume > 0:
                    # 成交大于0，证明是交易
                    trade_status = '交易'
                    # 成交额，转化为元
                    if fields[6][-1] in ['亿', '万']:
                        amount = float(fields[6][0:-1])
                        amount_unit = fields[6][-1]
                        if amount_unit == '亿':
                            amount *= amount * 1e8
                        elif amount_unit == '万':
                            amount *= amount * 1e4
                    else:
                        amount = float(fields[6])
                else:
                    # 成交量等于0 ，证明是停牌
                    amount = 0
                    trade_status = '停牌'

                # 构造保存用的document
                doc = {
                    # 股票代码
                    'code': code,
                    # 日期
                    'time': fields[0],
                    # 开盘价
                    'open': float(fields[1]),
                    # 收盘价
                    'close': float(fields[2]),
                    # 最高价
                    'high': float(fields[3]),
                    # 最低价
                    'low': float(fields[4]),
                    # 成交量，转化为股数
                    'volume': volume,
                    # 成交额
                    'amount': amount,
                    # 交易状态
                    'trade_status': trade_status
                }

                # 创建更新的请求
                update_requests.append(
                    UpdateOne(
                        {'code': code, 'time': fields[0]},
                        {'$set': doc},
                        upsert=True))

        # 分别保存到不同的行情库
        collection_name = 'daily_k'
        if authorityType == 'ba':
            collection_name = 'daily_k_ba'
        if len(update_requests) > 0:
            update_result = db[collection_name].bulk_write(update_requests, ordered=False)
            print('code: %s, modified: %4d, upserted: %4d' %
                  (code, update_result.modified_count, update_result.upserted_count))


def crawl_zs_k_data():
    # 先获取所有的股票列表
    codes = ['000001', '000300']

    # 创建连接池
    conn_pool = urllib3.PoolManager()

    url = 'http://pdfm.eastmoney.com/EM_UBG_PDTI_Fast/api/js?rtntype=1&token=4f1862fc3b5e77c150a2b985b12db0fd&cb=&id={0}1&type=k&iscr=false&_=1528087227767'

    # 抓取所有股票
    for code in codes:
        response = conn_pool.request('GET', url.replace('{0}', code), headers={'User-Agent': user_agent})

        # 如果获取到Response，则跳过股票
        if response is None:
            continue

        # 解析抓取结果
        hq_str = response.data.decode('UTF-8')[1:-1]

        hq_lines = hq_str.split("\n")

        # 更新的requests
        update_requests = []
        # 获取每一个字段
        for line in hq_lines:
            if len(line) > 0:
                # 逗号分隔
                fields = line.split(',')

                volume = int(fields[5]) * 100

                # 指数的状态一直为交易
                trade_status = '交易'
                # 成交额，转化为元
                if fields[6][-2:] == '万亿':
                	amount = float(fields[6][0:-2]) * 1e7 * 1e4
                elif fields[6][-1] in ['亿', '万']:
                    amount = float(fields[6][0:-1])
                    amount_unit = fields[6][-1]
                    if amount_unit == '亿':
                        amount *= amount * 1e8
                    elif amount_unit == '万':
                        amount *= amount * 1e4
                else:
                    amount = float(fields[6])

                # 构造保存用的document
                doc = {
                    # 股票代码
                    'code': code + '.SH',
                    # 日期
                    'time': fields[0],
                    # 开盘价
                    'open': float(fields[1]),
                    # 收盘价
                    'close': float(fields[2]),
                    # 最高价
                    'high': float(fields[3]),
                    # 最低价
                    'low': float(fields[4]),
                    # 成交量，转化为股数
                    'volume': volume,
                    # 成交额
                    'amount': amount,
                    # 交易状态
                    'trade_status': trade_status
                }

                # 创建更新的请求
                update_requests.append(
                    UpdateOne(
                        {'code': code + '.SH', 'time': fields[0]},
                        {'$set': doc},
                        upsert=True))

        # 保存到数据库
        if len(update_requests) > 0:
            update_result = db.daily_k.bulk_write(update_requests, ordered=False)
            print('code: %s, modified: %4d, upserted: %4d' %
                  (code, update_result.modified_count, update_result.upserted_count))


def crawl_finance_report():
    # 先获取所有的股票列表
    codes = get_all_codes()

    # 创建连接池
    conn_pool = urllib3.PoolManager()

    url = """http://dcfm.eastmoney.com//em_mutisvcexpandinterface/api/js/get?type=YJBB20_YJBB&token=70f12f2f4f091e459a279469fe49eca5&st=reportdate&sr=-1&filter=(scode={0})&p={page}&ps={pageSize}&js={"pages":(tp),"data":%20(x)}"""

    cookie = """emstat_bc_emcount=21446959091031597218; pgv_pvi=8471522926; st_pvi=95785429701209; _ga=GA1.2.700565749.1496634081; Hm_lvt_557fb74c38569c2da66471446bbaea3f=1499912514; _qddaz=QD.g2d11t.ydltyz.j61eq2em; ct=YTJNd7eYzkV_0WPJBmEs-FB0AGfyz7Z9G-Z1HbsPTxwV9TxpuvcB2fM1xoG5PhqgTI5KlrQZKFZReg3g3ltIwo8fMyzHhEzVjltYwjAigMTdZvdEHnU7QW2O-7u0dCkmtsFOBI4vbW1ELaZ9iUS9qPFAtIkL9M8GJTj8liRUgJY; ut=FobyicMgeV4t8TZ4Md7eLYClhCqi0wXPSu3ZyZ4h4Q8vWCyLMuChP80vhfidM2802fUv5AJEgl9ddudfTRqObGqQ47QN4oJS5hoWxdsHCY6lvJEeXDTNKWsdPhsfzg0i-ukMlT11XfPMIsBG9DzhW3xDAR3flNcqE5csB2rT3cfVPchlihFWHk-f3F1-lSsBjduc9_Ws_jjJEsi46xEai2mCVGd_O41yhPU3MWXl2_2QJU_ILgnzruwDvjeoQRtf8COKmiJCtE6hhy04RvSjmbzBVeZXqUhd; pi=4266045025913572%3bb4266045025913572%3b%e8%82%a1%e5%8f%8bZTLUIt%3bo97rhoY6b5AbF5jETm3t72EC9RGpIhrLsDj7myRgKyWSJmYrdl1WGaA9dMGpydaY4AptuI0ZgKDj6PCir1z%2bY1if6G0iITYI4Rv%2bPXy6H%2f4u7RgiD%2f2hCYAGnfitkw9HQXnqBETzflfUGnvGJysWiVyPlOp%2fZh4Hfe6NqssBxCqJUrGOCM06F7feAXC6Vapy%2fse0PT2a%3bVMsSChhqtxvtvecfLmv9FInLBANRLHpns2d%2bJGh272rIXhkWm%2bNK%2bXxkRKL2a0EgScqdtlcYN1QChVUWT7gmrH9py08FBPk2n5EQA9m9Zt5o2m%2bMuQhON2f66vlq%2bGk3Z66s%2brgCQhSPqoUPxluzSwBk7I9NNA%3d%3d; uidal=4266045025913572%e8%82%a1%e5%8f%8bZTLUIt; vtpst=|; em_hq_fls=old; emstat_ss_emcount=5_1505917025_902015979; st_si=83202211429810; em-quote-version=topspeed; showpr3guide=1; qgqp_b_id=367cbd71ad5c205f172815cdab571db9; hvlist=a-000858-2~a-000651-2~a-600000-1~a-300017-2~a-600020-1~a-600005-1~a-600004-1~a-162605-2~a-159901-2~a-600015-1~a-002364-2~a-600128-1~a-002357-2~a-002363-2~a-601106-1; HAList=a-sz-300059-%u4E1C%u65B9%u8D22%u5BCC%2Ca-sz-002607-%u4E9A%u590F%u6C7D%u8F66%2Ca-sh-603259-%u836F%u660E%u5EB7%u5FB7%2Ca-sz-000858-%u4E94%u7CAE%u6DB2%2Ca-sh-600165-%u65B0%u65E5%u6052%u529B%2Ca-sh-603013-%u4E9A%u666E%u80A1%u4EFD%2Ca-sz-002841-%u89C6%u6E90%u80A1%u4EFD%2Cf-0-399300-%u6CAA%u6DF1300%2Cf-0-000300-%u6CAA%u6DF1300%2Ca-sz-000651-%u683C%u529B%u7535%u5668%2Ca-sz-000735-%u7F57%u725B%u5C71"""
    user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36'

    for code in codes:
        response = conn_pool.request('GET', url.replace('{0}', code),
                                     headers={
                                         'Cookie': cookie,
                                         'User-Agent': user_agent})

        # 解析抓取结果
        result = json.loads(response.data.decode('UTF-8'), 'UTF-8')

        reports = result['data']

        update_requests = []
        for report in reports:
            doc = {
                # 报告期
                'report_date': report['reportdate'][0:10],
                # 公告日期
                'announced_date': report['latestnoticedate'][0:10],
                # 每股收益
                'eps': report['basiceps'],
                'code': code
            }

            update_requests.append(
                UpdateOne(
                    {'code': code, 'report_date': doc['report_date']},
                    {'$set': doc}, upsert=True))

        if len(update_requests) > 0:
            update_result = db.finance_report.bulk_write(update_requests, ordered=False)
            print('股票 %s, 财报，更新 %d, 插入 %d' %
                  (code, update_result.modified_count, update_result.upserted_count))


if __name__ == "__main__":
    if len(sys.argv) <= 1:
        print('请选择获取的数据类型: k - 未复权的日K线数据，kba - 后复权的日K数据，fin - 财务, zs - 指数')
    elif sys.argv[1] == 'k':
        crawl_k_data('')
    elif sys.argv[1] == 'kba':
        crawl_k_data('ba')
    elif sys.argv[1] == 'fin':
        crawl_finance_report()
    elif sys.argv[1] == 'zs':
    	crawl_zs_k_data()
    else:
        print('类型不对: k - 未复权的日K线数据，kba - 后复权的日K数据，fin - 财务')
