# -*- coding:utf-8 -*-

import requests
from lxml import etree
from fake_useragent import UserAgent
from http import cookiejar
import json
from datetime import datetime
import time
import re
import csv
import tushare as ts


requests.packages.urllib3.disable_warnings()  # 忽略HTTPS安全警告


"""
金融大数据开放社区
https://tushare.pro/
账号:18241255868
密码:gkx55868
"""


class Tushare_Get():
    def __init__(self):
        #声明一个CookieJar对象实例来保存cookie
        # requests.utils.dict_from_cookiejar(html.cookies)  # 获取cookies转dict
        self.cookie = cookiejar.CookieJar()
        ua = UserAgent(use_cache_server=False)  # 禁用服务器缓存
        self.headers = {
            # "User-Agent": ua.random
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1 Safari/605.1.15'
            # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            # 'Accept-Encoding': 'gzip, deflate',
            # 'Accept-Language': 'zh-CN,zh;q=0.9',
            # 'Cache-Control': 'max-age=0',
            # 'Connection': 'keep-alive',
            # 'Cookie': 'JUTE_SESSION_ID=997916ff-827a-4d1e-bcee-2b591302660e; _ga=GA1.2.765400345.1584785289; _gid=GA1.2.345690153.1584785289; DXY_USER_GROUP=63; JUTE_TOKEN=8c1f72d8-187b-4180-98ca-1efd5c7796cd; Hm_lvt_253e434fd63b62a2659ddd3e7412f769=1584785427; __utma=251724881.765400345.1584785289.1584785427.1584785427.1; __utmc=251724881; __utmz=251724881.1584785427.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); __utmt=1; __asc=8f38d5dc170fc928b6e2fd81d2c; __auc=8f38d5dc170fc928b6e2fd81d2c; _gat_gtag_UA_19877065_1=1; ifVisitOldVerBBS=true; JSESSIONID=50427AF2F170A4F63EB88F3E69AEF9E1; Hm_lpvt_253e434fd63b62a2659ddd3e7412f769=1584785458; __utmb=251724881.3.10.1584785427',
            # 'DNT': '1',
            # 'Host': 'i.dxy.cn',
            # 'Upgrade-Insecure-Requests': '1',
            # 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36'
        }
        # token
        self.token = "82064c6ed67bf9ce4aa83e4ba8597164d6780f8e4bcfd3c4c22800fb"
        # 初始化pro接口
        self.pro = ts.pro_api(self.token)

    # 获取股票-日线行情
    def get_gupiao(self):
        df = self.pro.daily(ts_code='600507.SH', start_date='20100101', end_date='20200402')
        for index, row in df.iterrows():
            dict_data = dict()
            dict_data['ts_code'] = row['ts_code']  # 股票代码
            dict_data['trade_date'] = row['trade_date']  # 交易日期
            dict_data['open'] = row['open']  # 开盘价
            dict_data['high'] = row['high']  # 最高价
            dict_data['low'] = row['low']  # 最低价
            dict_data['close'] = row['close']  # 收盘价
            dict_data['pre_close'] = row['pre_close']  # 昨收价
            dict_data['change'] = row['change']  # 涨跌额
            dict_data['pct_chg'] = row['pct_chg']  # 涨跌幅 （未复权，如果是复权请用 通用行情接口 ）
            dict_data['vol'] = row['vol']  # 成交量 （手）
            dict_data['amount'] = row['amount']  # 成交额 （千元）
            print(str(dict_data))
            with open("600507.csv", 'a', encoding="utf-8-sig", newline='') as csvfile:
                fileheader = ["ts_code", "trade_date", "open", "high", "low", "close", "pre_close", "change",
                          "pct_chg", "vol", "amount"]
                writer = csv.DictWriter(csvfile, fieldnames=fileheader)
                writer.writerow(dict_data)

    # 新冠肺炎
    def ncov_num(self):
        # 获取中国内地新冠状肺炎疫情感染统计人数
        df = self.pro.ncov_num(area_name="武汉")
        for index, row in df.iterrows():
            dict_data = dict()
            dict_data['发布日期'] = row['ann_date']  # 发布日期
            dict_data['地区名称'] = row['area_name']  # 地区名称
            dict_data['上一级地区'] = row['parent_name']  # 上一级地区
            dict_data['级别'] = row['level']  # 级别
            dict_data['累计确诊人数'] = row['confirmed_num']  # 累计确诊人数
            dict_data['累计疑似人数'] = row['suspected_num']  # 累计疑似人数
            dict_data['现有确诊人数'] = row['confirmed_num_now']  # 现有确诊人数
            dict_data['现有疑似人数'] = row['suspected_num_now']  # 现有疑似人数
            dict_data['累计治愈人数'] = row['cured_num']  # 累计治愈人数
            dict_data['累计死亡人数'] = row['dead_num']  # 累计死亡人数
            with open("./COVID-19_省_市.csv", 'a', encoding="utf-8-sig", newline='') as csvfile:
                fileheader = ["发布日期", "地区名称", "上一级地区", "级别", "累计确诊人数",
                              "累计疑似人数", "现有确诊人数", "现有疑似人数", "累计治愈人数",
                              "累计死亡人数"]
                writer = csv.DictWriter(csvfile, fieldnames=fileheader)
                writer.writerow(dict_data)


if __name__ == '__main__':
    tushare_get = Tushare_Get()
    # tushare_get.get_gupiao()
    tushare_get.ncov_num()