# 爬取同花顺概念信息_ 头部已分类信息
import os
import sys
import zipfile
import static.globals as sg
import requests
import pymysql
import traceback
import undetected_chromedriver as uc
from lxml import etree
from selenium.common import NoSuchElementException
from selenium.webdriver.common.by import By
import time
from dbse.mysql.db_mysql_insert import addThsGNStocks
from dbse.mysql.db_mysql_insert import updateGnCompleted
from dbse.mysql.db_mysql_insert import deleteGnStocks
from dbse.mysql.db_entity_stock_ths_gn_stocks import StockThsGNStocks
from selenium.webdriver import ChromeOptions
from selenium.webdriver.support.ui import WebDriverWait
from sel.proxy.selenium_proxy_auth_plugin import create_proxy_auth_extension as cpae
from sel.proxy.selenium_proxy_util import initSelWireChrome
from selenium.webdriver.support import expected_conditions as EC
from cookie.selenium_cookie_util import addCookie
from file.readFile import read
from dbse.mysql.db_mysql_insert import getDataBase
from cache.redis_util import getConnection
from cache.redis_util import getLock
import datetime
import json




# 处理一页数据
from utils.DateUtil import isOpenToday
from utils.StringUtil import remove_chinese_keep_numbers, get_number


def sel_gn_stocks(browser,db,add_sql,gn):
    gnCode = gn[0]                                              # 概念代码
    today = datetime.date.today().strftime("%Y-%m-%d")          # 时间
    content = browser.page_source
    if "暂无成份股数据" in content:
        return False
    # print(content)
    html = etree.fromstring(content,etree.HTMLParser())
    # result = html.xpath('//tbody//tr')                          # 股票数据
    result = html.xpath('//div[@id="maincont"]//tbody//tr')       # 股票数据 注意: 网页有多个版本 有的网页有两个tbody 所有要再通过div id=maincont 过滤一层
    for index, tr in enumerate(result):
        tds = tr.getchildren()
        rank = 1

        # splitUrlItems = tds[1].find('a').get('href').split("/")
        # gnCode = splitUrlItems[len(splitUrlItems)-2]
        # print(f"====index:{index}")
        # print(tds[1].find('a').text)                # 股票代码
        # print(tds[2].find('a').text)                # 股票名称
        # print(tds[13].text)                         # 市盈率
        # print(tds[12].text)                         # 总市值
        # print(tds[8].text)                          # 量比

        entity = StockThsGNStocks(
            tds[1].find('a').text,
            tds[2].find('a').text,
            today,
            gnCode,
            rank,
            get_number(tds[13].text),
            None,
            get_number(tds[12].text),
            get_number(tds[8].text)
        )
        addThsGNStocks(db,add_sql,entity)

    return True

# 获取代理server 地址 有效期1-5分钟（私密代理）
def get_proxy():
    url = "https://dps.kdlapi.com/api/getdps/?secret_id=ondbxg8msovv7ury9qbe&signature=zft63syjut8u3og5m9j5wung50mwah5f&num=1&format=text&sep=1"
    proxy_address = requests.get(url)
    return proxy_address.text

def initChrome():
    ### 初始化chrome浏览器 和 cookie
    driver_path = r'C:\Program Files\Google\Chrome\Application\chromedriver.exe'
    useProxy = True
    if useProxy:
        option = ChromeOptions()
        proxy_address = get_proxy()
        print(f"===============使用代理:{proxy_address}")
        proxy_host = proxy_address.split(":")[0]            # proxy ip
        proxy_port = proxy_address.split(":")[1]            # proxy port
        proxy_username = "d2231274096"                      # 代理账号
        proxy_password = "ela9v3e9"                         # 代理密码
        extension_path = cpae(proxy_host, proxy_port, proxy_username, proxy_password)   # 配置代理插件

        option.add_extension(extension_path)
        option.add_argument('--ignore-certificate-errors')
        option.add_argument('--disable-web-security')
        option.add_argument('--allow-running-insecure-content')
        option.add_argument('--no-sandbox')
        option.add_argument('--disable-dev-shm-usage')
        browser = uc.Chrome(driver_executable_path=driver_path,options=option)        # 生产用: 走代理 会消耗 ip
    else:
        browser = uc.Chrome(driver_executable_path=driver_path)                         # 测试用: 不使用代理 适合跑单个概念

    browser.set_window_size(1366,768)

    # browser.get('http://httpbin.org/ip')
    # time.sleep(10)
    # 定义cookie 需要先通过get_cookie.py 生成cookie.txt
    cookies = []
    with open('./cookie/cookie.txt', 'r') as f:
        cookies = json.load(f)
    # 先访问一下主页 让browser先识别域名
    browser.get('https://q.10jqka.com.cn')
    time.sleep(2)       # 让页面加载一下
    # 加载cookie
    for cookie in cookies:
        # 调整域名为当前域名 (如果域名不匹配会报错)
        if '.10jqka.com.cn' == cookie['domain']:
            # 如果cookie的域名和当前browser不匹配会报错
            browser.add_cookie(cookie)
            continue
    # 刷新使Cookie生效
    browser.refresh()
    return browser


def startWork(profile,batch):




    try:
        browser = None
        redisTemplate = None
        db = None
        redisTemplate = getConnection(profile)
        db = getDataBase(profile)
        currentGn = None                        # 当前处理的概念
        ### 原来的逻辑: 固定拆分成两个批次
        # if (batch == '1'):
        #     query_sql = 'select t1.gn_code ,t2.gn_url from stock_scrapy_ths_gn t1 join (select distinct gn_code, gn_url from stock_ths_gn)as t2 on t1.gn_code = t2.gn_code where t1.completed = "0" order by t1.gn_code limit 20'
        # else:
        #     query_sql = 'select t1.gn_code ,t2.gn_url from stock_scrapy_ths_gn t1 join (select distinct gn_code, gn_url from stock_ths_gn)as t2 on t1.gn_code = t2.gn_code where t1.completed = "0" order by t1.gn_code desc limit 20'

        # 新的逻辑：将所有300+ 概念分成四个批次来做,每个批次处理100个gn, 然后每个任务再分页处理20个gn 循环处理
        qOffset = 130 * (int(batch) -1)
        query_sql =  f'select * from (select t1.gn_code ,t2.gn_url,t1.completed from stock_scrapy_ths_gn t1 join (select distinct gn_code, gn_url from stock_ths_gn)as t2 on t1.gn_code = t2.gn_code order by t1.gn_code limit 130 offset {qOffset}) list where list.completed = "0" limit 20 '

        add_sql = 'INSERT INTO stock_ths_gn_stocks (stock_code, stock_name,create_time,gn_code,gn_rank,per,pbr,total_capital,vol_ratio) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)'
        cursor = db.cursor()
        cursor.execute(query_sql)
        gns = cursor.fetchall()
        if len(gns) == 0:
            print("=====================没有跟多需要处理的任务了=====================")
            return

        # 初始化
        # browser = initChrome()
        browser = initSelWireChrome(useProxy=True,isHeadless=False,host='https://q.10jqka.com.cn',setTimeOut=False,ignorePic=False,redis=None)
        # append cookie
        browser = addCookie(browser)
        # 定义一个wait
        wait = WebDriverWait(browser,10)
        for gn in gns:
            currentGn = gn
            isGetLock =  getLock(redisTemplate,gn[0])
            if(isGetLock):
                print(f'获取锁{gn[0]},成功')
            else:
                print(f'获取锁{gn[0]},失败.休息一下')
                time.sleep(10)
                continue

            # print(f"正在抓取概念{gn[0]}")
            # 从第一个概念开始爬
            selContinue = True      # 是否继续抓取
            page = 1                # 当前抓取的页码
            browser.get(gn[1])      # 打开首页
            # 删除概念对应股票
            deleteGnStocks(db,gn[1])

            time.sleep(1)
            # 抓取总页码
            wait.until(EC.presence_of_element_located((By.ID,'m-page')))            # 显示延迟加载  需要等id=m-page 的div加载完成
            # 这里需要注意 在没有分页的情况下 下面的element是找不到的所以要 通过异常来判断是否是分页的网页 （超过10条数据就是有分页）
            try:
                eleLastPage =  browser.find_element(By.XPATH,"//span[@class='page_info']")  # <span class="page_info">1/39</span>
                lastPage = eleLastPage.text.split("/")[1]       # 1/39  39是最后一页的页码
            except NoSuchElementException :
                lastPage = 1
            print(f"lastPage is {lastPage}")
            while selContinue:
                # wait.until(EC.presence_of_element_located((By.ID,'m-page')))
                # print(f"抓取第{page}页")
                wait.until(EC.presence_of_element_located((By.ID,'maincont')))      ## 等待数据maincont 加载完成
                time.sleep(1)                                                       ## 强行加1S 避免抓取上一页数据 （移到这里了）
                selContinue = sel_gn_stocks(browser,db,add_sql,gn)                  ## 正式抓取数据
                page = page + 1
                # 触发下一页 (一次点击事件)
                if page <= int(lastPage):
                    wait.until(EC.presence_of_element_located((By.ID,'m-page')))
                    try:
                        nextPage = browser.find_element(By.XPATH,f"//a[@page='{page}']")
                        nextPage.click()
                        # time.sleep(1)             # 1. 没办法了 只能先等1S  不然下一页的page_source 提取的还是上一页的 # 2. 等待1S 移到了加载主数据那边了
                    except NoSuchElementException:
                        print(f"====================={gn[0]}=====================")
                        # todo 写一条数据库消息 用于后面补偿
                        break
                    # nextPage = browser.find_element(By.XPATH,f"//a[@page='{page}']")
                    # nextPage.click()
                    # time.sleep(2)

                # 跳出条件
                if page > int(lastPage):            # 比较的时候需要将 string转换成int
                    selContinue = False

            # 一个概念工作收尾 需要更新completed
            updateGnCompleted(db,gn[0])
            # 释放锁
            redisTemplate.delete(gn[0])
            # break  # 测试用 只爬一个概念

        # 批次完成后 需要关闭浏览器
        browser.quit()
    except Exception as e:
        print(f"异常信息: {e}")
        print("详细堆栈信息:")
        traceback.print_exc()  # 打印完整的堆栈跟踪
        print("======================任务终止，等待重启======================")
        if currentGn!= None:
            redisTemplate.delete(currentGn[0])

        if(redisTemplate != None):
            redisTemplate.close()

        if(browser != None):
            browser.quit()

        if(db != None):
            db.close()

        time.sleep(5)
        startWork(profile,batch)
    finally:
        print("finally work run")


profile = sys.argv[1:][0]
batch = sys.argv[1:][1]
sg.profile = profile
if isOpenToday():
    for index in range(20):
        print(f"=================批次{index}完成，等待下一个批次===================")
        startWork(profile,batch)