import datetime
import queue
import re
import threading
import time
import requests
import json

from gdhs.gdhsjson import GdhsParseJsonTask
from database import DataBase

dbname = "../data.db"
req_list = queue.Queue()


def init_table():
    db = DataBase(dbname)
    db.execute('''create table if not exists stock_gdhs
                      --'股东户数'
    (
        name  text not null, -- '股票名称'
        code varchar(6) not null, -- '股票代码'
        date text,-- '发布时间'
        gds text not null ,-- '股东数'
        avggds text not null ,-- '平均持股数'
        zsz text not null, -- '总市值',
        ggrq text not null, -- '公告日期'
        primary key(code,date)
    ) 
    ''')
    db.close()


class Crawl(threading.Thread):  # 采集线程类
    # 初始化
    def __init__(self, number, req_list):
        # 调用Thread 父类方法
        super(Crawl, self).__init__()
        self.number = number
        self.req_list = req_list
        self.gdhstask = GdhsParseJsonTask
        # 线程启动的时候调用

    def run(self):
        # 输出启动线程信息
        print('启动采集线程%d号' % self.number)
        db = DataBase(dbname)
        # 如果请求队列不为空，则无限循环，从请求队列里拿请求url
        while self.req_list.qsize() > 0:
            stock_num = self.req_list.get()
            datas = self.gdhstask.get_stock_by_interface(stock_num)
            sqls = []
            for data in datas:
                # SECURITY_CODE,SECURITY_NAME_ABBR,END_DATE,HOLDER_NUM,AVG_HOLD_NUM,TOTAL_MARKET_CAP,HOLD_NOTICE_DATE
                sql = '''insert or ignore into stock_gdhs(code,name, date,gds, avggds,zsz,ggrq) values('{}','{}','{}','{}','{}','{}','{}')'''\
                    .format(data[0], data[1], time.strftime('%Y%m%d',time.strptime(data[2], '%Y-%m-%d %H:%M:%S')),
                            data[3],"{:.2f}".format(data[4]),data[5],time.strftime('%Y%m%d',time.strptime(data[6], '%Y-%m-%d %H:%M:%S')))
                sqls.append(sql)
            db.batch_execute(sqls)
            # time.sleep(1)
        db.close()  # 关闭数据库


def get_all_stocks():
    url = 'https://xueqiu.com/service/v5/stock/screener/quote/list?page={}&size={}&order=desc&orderby=percent&order_by=percent&market=CN&type=sh_sz&_={}'
    headers = {
        "Accept": "*/*",
        "Accept-Encoding": "gzip, deflate, br",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "cache-control": "no-cache",
        "Connection": "keep-alive",
        "Host": "xueqiu.com",
        "Referer": "https://xueqiu.com/hq",
        "sec-ch-ua": '"Google Chrome";v="89", "Chromium";v="89", ";Not A Brand";v="99"',
        "sec-ch-ua-mobile": '?0',
        "Sec-Fetch-Dest": "empty",
        "Sec-Fetch-Mode": "cors",
        "Sec-Fetch-Site": "same-origin",
        "User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36",
        "X-Requested-With": "XMLHttpRequest",
    }
    res = requests.get(url.format(1,1,11111), headers=headers, verify=False)
    data = json.loads(res.text)
    total = data['data']['count']
    for page in range(int(total/500)+1):
        res = requests.get(url.format(page, 500, 11111), headers=headers, verify=False)
        data = json.loads(res.text)
        if 'data' in data and data['data']['list']:
            dd = data['data']['list']
            for row in dd:
                stock_nu0 = row['symbol'].strip('"')
                match = re.search("([630]\\d{5})", stock_nu0)
                if match is not None:
                    req_list.put(match.group(1))





if __name__ == "__main__":
    get_all_stocks()
    init_table()
    req_thread = []
    for i in range(10):
        t = Crawl(i + 1, req_list)  # 创造线程
        t.start()
        req_thread.append(t)
    for t in req_thread:
        t.join()

    # 关闭浏览器
