# 爬取同花顺概念信息_ 头部已分类信息
import os
import sys

import pymysql
import undetected_chromedriver as uc
from lxml import etree
from selenium.webdriver.common.by import By
import time
from dbse.mysql.db_mysql_insert import addThsGN
from dbse.mysql.db_entity_stock_ths_gn import StockThsGN
from selenium.webdriver import ChromeOptions
from file.readFile import read
import datetime
from proxy.selenium_proxy_util import initChrome, initSelWireChrome
from dbse.mysql.db_mysql_insert import getDataBase
import json
import static.globals as sg
from utils.DateUtil import isOpenToday


def sel_gn_history(browser,db,sql,page):
    content = browser.page_source
    html = etree.fromstring(content,etree.HTMLParser())
    result = html.xpath('//tbody//tr')      # tbody 下的每一行数据
    for index, tr in enumerate(result):
        tds = tr.getchildren()
        splitUrlItems = tds[1].find('a').get('href').split("/")
        gnCode = splitUrlItems[len(splitUrlItems)-2]
        print(f"====index:{index}")
        # print(gnCode)                               # 概念代码
        # print(tds[1].find('a').text)                # 概念名称
        # print(tds[1].find('a').get('href'))         # 概念url
        # print(tds[0].text)                          # 创建时间
        # print(tds[2].find('a').text)                # 概念描述


        entity = StockThsGN(gnCode,
                            tds[1].find('a').text,
                            tds[1].find('a').get('href'),
                            tds[0].text,
                            '2',
                            tds[2].text if tds[2].find('a') == None else tds[2].find('a').text)
        addThsGN(db,sql,entity)

pProfile = sys.argv[1:][0]
sg.profile = pProfile
if isOpenToday():
    # 环境参数 dev 开发 prod 生产

    # 初始化browser
    #browser = initChrome(False,False,'https://q.10jqka.com.cn')
    browser = initSelWireChrome(useProxy=True,isHeadless=True,host='https://q.10jqka.com.cn',setTimeOut=False,ignorePic=True,redis=None)
    # 定义cookie 需要先通过get_cookie.py 生成cookie.txt
    cookies = []
    # cookie_path = f'{os.environ.get("PYTHONPATH").split(";")[0]}/cookie/cookie.txt'
    with open('./cookie/cookie.txt', 'r') as f:
    # with open(cookie_path, 'r') as f:
        cookies = json.load(f)

    # 先访问一下主页 让browser先识别域名
    browser.get('https://q.10jqka.com.cn')
    for cookie in cookies:
        # 调整域名为当前域名
        if '.10jqka.com.cn' == cookie['domain']:
            # 如果cookie的域名和当前browser不匹配会报错
            browser.add_cookie(cookie)
            # print(cookie)
            continue
    # 刷新使Cookie生效
    browser.refresh()


    # 开始抓取，访问首页
    print('====selenium work begin====')
    browser.get('https://q.10jqka.com.cn/gn')
    time.sleep(2)
    content = browser.page_source

    # 抓取总页码
    eleLastPage = browser.find_element(By.XPATH,"//span[@class='page_info']")  # <span class="page_info">1/39</span>
    lastPage = eleLastPage.text.split("/")[1]       # 1/39  39是最后一页的页码

    # 遍历所有页
    selContinue = True      # 是否继续抓取
    page = 1                # 当前抓取的页码

    # 定义数据库
    db = getDataBase(pProfile)      # 通过profile 获取database
    sql = 'INSERT INTO stock_ths_gn (gn_code,gn_name,gn_url,create_time,gn_type,gn_desc) VALUES (%s, %s,%s,%s,%s,%s)'

    # 抓到第6页 需要登录 （使用cookie 模拟登录）
    while selContinue:
        # 抓取当页面
        print(f"正在抓取{page}页")
        sel_gn_history(browser,db,sql,page)

        page = page + 1
        # 触发下一页
        if page <= int(lastPage):
            nextPage = browser.find_element(By.XPATH,f"//a[@page='{page}']")
            nextPage.click()
            time.sleep(2)  #等待页面加载
        # 跳出条件
        if page > int(lastPage):            # 比较的时候需要将 string转换成int
            selContinue = False

else:
    print("==================Stock is not open today==================")


