# 导入需要使用到的模块
import datetime
import random
import time
import urllib.request
import os
import json

temp_path = "operator_records/temp/dfcf/"
open_cache = True


def get_random_ip():
    ip_list = ['', '']
    proxy_support = urllib.request.ProxyHandler({random.choice(ip_list)})
    urllib.request.build_opener(proxy_support)
    urllib.request.install_opener()


# 获取数据，写缓存
def get_html(url, nkey, times=1):
    # print(url)
    dirs = (temp_path + nkey).split('/')
    path = "/".join(dirs[0:len(dirs) - 1])
    key = dirs[len(dirs) - 1]
    if not os.path.exists(path):
        os.makedirs(path)
    if open_cache and key != '':
        # print(key)
        try:
            with open(path + '/' + key + ".json", 'r') as f:
                res = json.loads(f.read())
                if len(res) > 0:
                    return str(res)
        except:
            print("重新获取")
            pass
    # 添加请求头模拟浏览器，防止被禁
    try:
        """
        # 使用随机ip隐藏身份
        ip_list = ['127.0.0.1', '14.215.177.39', '58.49.159.228', '140.143.52.226', '40.143.52.226']
        proxy_support = urllib.request.ProxyHandler({'http': random.choice(ip_list)})
        opener = urllib.request.build_opener(proxy_support)
        opener.add_header = [{'User-Agent',
                              "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36"}]
        urllib.request.install_opener(opener)
        """
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3775.400 QQBrowser/10.6.4208.400'
            ,
            'Cookie': 'device_id=149ad0ff1a18a076cc4f97ad25126e44; s=c5127gq314; __utmz=1.1676714459.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); bid=ca6326f2403e7fb1e93b1289a2f66c81_le9sigz0; Hm_lvt_1db88642e346389874251b5a1eded6e3=1676685220,1676789346; remember=1; xq_a_token=203b568df4cd33a20b9a7d3d074f15c043db6eb0; xqat=203b568df4cd33a20b9a7d3d074f15c043db6eb0; xq_id_token=eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJ1aWQiOjY0OTgwMzI2MjAsImlzcyI6InVjIiwiZXhwIjoxNjc5MTkwOTA4LCJjdG0iOjE2NzY3ODk0MjgwNzMsImNpZCI6ImQ5ZDBuNEFadXAifQ.am6YSgvHaBi3uyAmgr3k8nERMRHmdA_yF1gq3LG_Hi0uqnDUiSCh4O7FYtU-Zom6SYiyLf4ifB86GjoFpPH0TTy4-2KRAjMww5_zW_Xo5OFIhQn3ZCPzt-3-nK8Q0M8Jmx5EKx-EFpc0Tocxmxa2_TTfSJHbBiO69p5CnN6k_ThYEPVrWdej-2lHAfQmuvzVhjwA303Wzg7g5Y7E5bw1Hc57UqUuKDLRbEj5qFwC_ASF1MpaFvbtImDIbECp3cMW0hnTHgxmYGYMGy7OT5wVcJxx2ssCWpXViqN1aVp7TsBiFOpT0GCic_OyGhtv6WqlRfjc1fPKkgfPQsfDEG74cw; xq_r_token=9ee1f22e73a2aeff0c3f4baae573ba9662ec288a; xq_is_login=1; u=6498032620; Hm_lpvt_1db88642e346389874251b5a1eded6e3=1676789437; __utma=1.783941525.1676714459.1676714459.1676789438.2; __utmc=1; acw_tc=2760779016767930066143123e1879249c577a6519e03892e435e4d7d8160a'
        }
        url = urllib.request.Request(url, headers=headers)
        html = urllib.request.urlopen(url).read()
        html = html.decode('utf-8')
        if open_cache and key != '':
            with open(path + '/' + key + ".json", 'w') as f:
                f.write(json.dumps(html))
    except Exception as e:
        print(url)
        print(e)
        if times < 3:
            print('重试次数：' + str(times))
            times = times + 1
            time.sleep(3)
            get_html(url, nkey, times)
    # print(html)
    return html


def get_all_stock():
    code_list = []
    try:
        Url = 'https://13.push2.eastmoney.com/api/qt/clist/get?cb=jQuery11240410073680447953_1663050144345&pn=1&pz=10000&po=1&np=1&ut=bd1d9ddb04089700cf9c27f6f7426281&fltt=2&invt=2&wbp2u=|0|0|0|web&fid=f3&fs=m:0+t:6,m:0+t:80,m:1+t:2,m:1+t:23,m:0+t:81+s:2048&fields=f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152&_=1663050144359'
        # 实施抓取
        json_data = get_html(Url, get_td() + "get_all_stock/").replace("\\", "")
        json_data = json_data[json_data.index('(') + 1:json_data.index(')')]
        diff_list = json.loads(json_data).get('data').get('diff')
        # 获取所有股票代码（以6开头的，应该是沪市数据）集合
        for item in diff_list:
            op = {
                "stkCode":str(item.get('f12')),
                "stkName": str(item.get('f14')),
                "market": str(item.get('f13')),
                "cjjg": str(item.get('f2'))
            }
            code_list.append(op)
    except Exception as e:
        print(e)
    return code_list


# 沪市前面加0，深市前面加1，比如0000001，是上证指数，1000001是中国平安
def get_daily(code, start='20000101', end='20500101'):
    data_list = []
    try:
        Url = "http://push2his.eastmoney.com/api/qt/stock/kline/get?" \
              "cb=jQuery112406517043891833485_1663229199080" \
              "&fields1=f1%2Cf2%2Cf3%2Cf4%2Cf5%2Cf6" \
              "&fields2=f51%2Cf52%2Cf53%2Cf54%2Cf55%2Cf56%2Cf57%2Cf58%2Cf59%2Cf60%2Cf61" \
              "&ut=7eea3edcaed734bea9cbfc24409ed989" \
              "&klt=101" \
              "&fqt=1" \
              "&secid=" + code + \
              "&beg=" + start + \
              "&end=" + end + \
              "&_=1663229199268"
        json_data = get_html(Url, get_td() + "get_daily/" + code + "&" + start + "&" + end).replace("\\", "")
        json_data = json_data[json_data.index('(') + 1:json_data.index(')')]
        data = json.loads(json_data).get('data')
        if data is not None:
            df = data.get('klines')
            for i in range(0, len(df)):
                data_list.append(df[i] + "," + code + "," + data.get("name") + "," + str(data.get("market")))
        # print(data_list)
    except Exception as e:
        print(e)
    return data_list


# 沪市前面加0，深市前面加1，比如0000001，是上证指数，1000001是中国平安
def get_minutes(code, start='19900101', end=''):
    data_list = []
    try:
        Url = "http://push2.eastmoney.com/api/qt/stock/trends2/get?" \
              "cb=jQuery112406517043891833485_1663229199084" \
              "&fields1=f1%2Cf2%2Cf3%2Cf4%2Cf5%2Cf6%2Cf7%2Cf8%2Cf9%2Cf10%2Cf11%2Cf12%2Cf13" \
              "&fields2=f51%2Cf52%2Cf53%2Cf54%2Cf55%2Cf56%2Cf57%2Cf58" \
              "&ut=fa5fd1943c7b386f172d6893dbfba10b" \
              "&ndays=1" \
              "&iscr=0" \
              "&iscca=0" \
              "&secid=" + code + \
              "&_=1663229199353"
        json_data = get_html(Url, get_td(True, True) + "get_minutes/" + code + "&" + start + "&" + end).replace("\\",
                                                                                                                "")
        json_data = json_data[json_data.index('(') + 1:json_data.index(')')]
        data = json.loads(json_data).get('data')
        data_list = []
        if data is not None:
            df = data.get('trends')
            for i in range(0, len(df)):
                data_list.append(df[i] + "," + code + "," + data.get("name") + "," + str(data.get("market")))
        # print(data_list)
    except Exception as e:
        print(e)
    return data_list


def get_td(hasH=False, hasM=False, hasS=False):
    td = datetime.datetime.now().strftime('%Y-%m-%d')
    hour = time.localtime().tm_hour
    min = time.localtime().tm_min
    sec = time.localtime().tm_sec
    append = (str(hour) if hasH else '') + (str(min) if hasM else '') + (str(sec) if hasS else '')
    return td + append
