import __init__
import sys
import os
import pandas as pd
import json
import urllib.request
from lxml import etree
import time
import execjs

import datetime as dt
import time
import random
import requests
import re

pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)

import cons as ct

class WencaiCookie:

    def getHeXinVByHttp():
        with open(os.path.dirname(os.path.dirname(__file__)) + '/stock/hexin.js', 'r') as f:
            jscontent = f.read()
        context = execjs.compile(jscontent)
        return context.call("v")

"""

>>>ths_bk(ths_code="gn")
>>>ths_bk(ths_code="thshy")
"""
def search(query_string,pagenum=1):
    headers = {
        "Accept": "application/json,text/javascript,*/*;q=0.01",
        "Accept-Encoding": "gzip, deflate",
        "Accept-Language": "zh-CN,zh;q=0.8",
        'Connection': 'keep-alive',
        'Content-Type': "application/x-www-form-urlencoded; charset=UTF-8",
        'User-Agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36",
        'X-Requested-With': "XMLHttpRequest"

    }
    headers['hexin-v']=WencaiCookie.getHeXinVByHttp()
    payload = {
        "question": query_string,
        "page": pagenum,
        "perpage": 50,
        "log_info": '{"input_type": "typewrite"}',
        "source": "Ths_iwencai_Xuangu",
        "version": 2.0,
        "secondary_intent": "",
        "query_area": "",
        "block_list": "",
        "add_info": '{"urp": {"scene": 1, "company": 1, "business": 1}, "contentType": "json", "searchInfo": true}'
    }

    r = requests.post(url='http://x.10jqka.com.cn/unifiedwap/unified-wap/v2/result/get-robot-data',
                                 data=payload, headers=headers)
    #print(r.text.encode('utf-8').decode('unicode_escape'))
    result = r.json()['data']['answer'][0]['txt'][0]['content']['components'][0]['data']['datas']
    #print(r.json()['data'])
    return result
def searchMain(query_string,num=50):#num不超过100
    #通过search(query_string,i) 获取数据
    df_return = pd.DataFrame()
    for i in range(num):
        time_sleep_random=random.uniform(1.5, 3)
        if time_sleep_random>2.5:
            time.sleep(5)
        time.sleep(time_sleep_random)

        #df_return
        #并进行re化
        ct._write_console()
        json_=search(query_string,i)
        df = pd.DataFrame().from_dict(json_)
        if df.empty==True:
            # df_return=chajian.dftore(df_return)
            return df_return
        df_return = df_return.append(df, ignore_index=True)

    # df_return = chajian.dftore(df_return)
    return df_return
#--------------------------------------------------
"""
转移到ths_bk.py
"""
def ths_bk(ths_code="gn"):
    #>>>ths_bk(ths_code="gn")
    # !/usr/bin/env python3
    # -*- coding: utf-8 -*-
    """
    gn
    thshy :http://q.10jqka.com.cn/thshy/ #同花顺行业
    Created on Fri Nov 17 19:41:44 2017

    @author: Mr.ZeroW

    同花顺板块成分股
    """

    # 首先不同板块地址不同 http://q.10jqka.com.cn/gn/detail/order/desc/page/1/ajax/1/code/300018
    # 需要高出不同板块页数有多少，才能得出地址


    header = ths_header()
    a = requests.get("http://q.10jqka.com.cn/{}/".format(ths_code), headers=header)
    html = etree.HTML(a.text)

    gnbk = html.xpath('/html/body/div[2]/div[1]/div//div//div//a')
    thsgnbk = []
    for i in range(len(gnbk)):
        thsgnbk.append((gnbk[i].text))
    print(thsgnbk)
    # 板块代码
    bkcode = html.xpath('/html/body/div[2]/div[1]/div//div//div//a/@href')
    bkcode = list(map(lambda x: x.split('/')[-2], bkcode))
    data = {'Name': thsgnbk}

    # 存储
    gnbk = pd.DataFrame(data, index=bkcode)
    gnbk.to_csv('gnbk.csv')

    print('板块名称以及代码已爬取，存储文件名：gnbk.csv')
    # 导入板块名称和代码
    data = pd.read_csv('gnbk.csv')
    # 建立数据框，四列【板块id, 板块name, 成分股id, 成分股name】
    start = time.time()
    temp = pd.DataFrame()
    for i in range(len(data)):
        bk_code = str(data.iloc[i, 0])
        name = str(data.iloc[i, 1])
        _df = _md(bk_code, name)
        temp = pd.concat([temp, _df])
        time.sleep(random.randint(15, 20))
        if i%10==1:#临时保存
            temp.to_csv("筛选数据_{}.txt".format(ths_code), encoding="utf-8")

    end = time.time()
    print('爬取结束！！\n开始时间：%s\n结束时间：%s\n' % (time.ctime(end), time.ctime(start)))
    temp.to_csv("筛选数据_{}.txt".format(ths_code), encoding="utf-8")
def ths_header(v=""):
        v = WencaiCookie.getHeXinVByHttp()
        header = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Cache-Control': 'no-cache',
            'Connection': 'keep-alive',
            'Cookie': 'Hm_lvt_78c58f01938e4d85eaf619eae71b4ed1=1631512319; v={}'.format(v),
            'Host': 'q.10jqka.com.cn',
            'Pragma': 'no-cache',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36  ',
        }
        return header
def _md(bk_code="307408",bk_name=""):
        s_id = []
        s_name = []
        url = 'http://q.10jqka.com.cn/gn/detail/code/' + bk_code + '/'
        header = ths_header()
        a = requests.get(url, headers=header)
        # 得出板块成分股有多少页
        html = etree.HTML(a.text)
        result = html.xpath('//*[@id="m-page"]/span/text()')
        print(result)
        if len(result)==0:
            return None
        try:
            page = int(result[0].split('/')[-1])
            for j in range(page):
                page_n = str(j + 1)
                #             print(page_n)
                curl = 'http://q.10jqka.com.cn/gn/detail/order/desc/page/' + page_n + '/ajax/1/code/' + bk_code
                if j % 3 == 1:
                    header = ths_header()
                a = requests.get(curl, headers=header)
                #->print(curl)
                html = etree.HTML(a.text)
                # 成分股代码
                stock_code = html.xpath('/html/body/table/tbody/tr/td[2]/a/text()')
                #             print(stock_code)
                # 成分股名称
                stock_name = html.xpath('/html/body/table/tbody/tr/td[3]/a/text()')
                #           print(stock_name)
                s_id.append(stock_code)
                s_name.append(stock_name)
            #将纬打击
            example = s_id
            example1 = [token for st in example for token in st]
            print(example1)
            example = s_name
            example2 = [token for st in example for token in st]
            print(example2)
            df_=pd.DataFrame({"S_ID":example1,"S_NAME":example2})
            df_["bk_id"]=bk_code
            df_["bk_name"] = bk_name
            print(df_)
            df_.to_csv("筛选数据_temp.txt", encoding="utf-8")
            return df_
        except:

            raise ("error")

if __name__ == '__main__':
    # search.search("价格大于250天均线且换手率大于3", 100)

    # df=searchMain("20210609价格大于250天均线且换手率大于3")
    # df=searchMain('所属行业关注度排名前10',20)
    # print(df)
    """
    #保存到other/11中->后续读取等转到data2analyse。mg中
    # path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
    # path = os.path.join(path, "db", "tushare_his", "other", '{}.json'.format("11"))
    # print("->开始保存到other")
    # df.to_csv("{}".format(path), encoding="utf-8")
    """

    # from data2analyse import mg
    #mg.test()

    # name="人气概念板块排名及行业"





    # name="成交量，5日成交量，30日成交量，120日成交量"
    # df = searchMain(name, 2)
    #
    # a=df.columns
    # print(a)
    # choose_begin_name = "区间成交量"
    # cjl="成交量"
    # list_i = []
    # for i in a:
    #     m = re.search('^{}'.format(choose_begin_name), i)
    #     if m == None:
    #         pass
    #     else:
    #         list_i.append(i)
    # sorted_id = sorted(range(len(list_i)), key=lambda k: list_i[k], reverse=False)
    # print('元素索引序列：', sorted_id)
    # data=df[[list_i[sorted_id[0]], list_i[sorted_id[1]], list_i[sorted_id[2]],'code',"股票代码","股票简称"]]
    # data.rename(columns={list_i[sorted_id[0]]:"120日",
    #                      list_i[sorted_id[1]]:"30日",
    #                      list_i[sorted_id[2]]: "5日",}, inplace=True)
    # print(data)
    print(WencaiCookie.getHeXinVByHttp())
    ths_bk()








