import json
import re
import sys
import time

import execjs
import requests
from bs4 import BeautifulSoup
import csv

headers = {
    "Host": "q.10jqka.com.cn",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:95.0) Gecko/20100101 Firefox/95.0",
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
    "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
    "Accept-Encoding": "gzip, deflate",
    "Referer": "http://q.10jqka.com.cn/gn/",
    "Connection": "keep-alive",
    # "Cookie": "Hm_lvt_78c58f01938e4d85eaf619eae71b4ed1=1639551899; v=A9EFgnPdVFnLYrhNtQSqJ8Sp4NxrPkWw77LpxLNmzRi3Wv8M-45VgH8C-ZhA; log=; Hm_lpvt_78c58f01938e4d85eaf619eae71b4ed1=1639552294; historystock=300603; spversion=20130314; searchGuide=sg",
    "Upgrade-Insecure-Requests": "1",
    "Pragma": "no-cache",
    "Cache-Control": "no-cache"
}

headers2 = {
    'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.119 Safari/537.36',
    'cookie': ''
}

with open('./aes.min.js', 'r') as f:
    jscontent = f.read()
context = execjs.compile(jscontent)

def get_one_gianian(code):
    url = 'http://q.10jqka.com.cn/gn/detail/code/' + code + '/'
    res = requests.get(url, timeout=30, headers=headers).text
    # print(res)
    soup = BeautifulSoup(res, 'lxml')
    # print(soup)
    # content = soup.findAll('a', target="_blank", href=re.compile("http://q.10jqka.com.cn/gn/detail/code/*"))

    # page_info: [<span class="page_info">1/4</span>]
    page_info = soup.findAll('span', class_="page_info")
    # total_page: 4
    # print(page_info)
    # 只有一页的情况
    if len(page_info) <= 0:
        total_page = 1
    else:
        total_page = int((page_info[0].text).split('/')[-1])
    # print(total_page)

    code_list = []
    for i in range(1, total_page+1):
        headers2['cookie'] = 'v={}'.format(context.call("v"))
        page_url = 'http://q.10jqka.com.cn/gn/detail/field/264648/order/desc/page/' + str(i) + '/ajax/1/code/' + code
        new_table = requests.get(page_url, timeout=10, headers=headers2).text
        # print(soup)
        soup = BeautifulSoup(new_table, 'lxml')
        new_content = soup.findAll('a', target="_blank", href=re.compile("http://stockpage.10jqka.com.cn/*"))
        # print(new_content)
        for nc in new_content:
            if nc.text.isdigit():
                # print(nc.text)
                code_list.append(nc.text)
    print(len(code_list))
    # import time
    # time.sleep(5)
    return code_list

if __name__ == '__main__':
	get_one_gianian("305376")

# print(page_info)
# <span class="page_info">1/4</span>
# <td><a href="http://stockpage.10jqka.com.cn/600708/" target="_blank">600708</a></td>
