# 爬取同花顺概念信息_ 头部已分类信息
import os
import sys
import zipfile
import requests
import pymysql
import traceback
import undetected_chromedriver as uc
from lxml import etree
from selenium.common import NoSuchElementException, TimeoutException
from selenium.webdriver.common.by import By
import time

from cache.redis_util import getConnection
from dbse.mysql.db_mysql_insert import addThsGNStocks
from dbse.mysql.db_mysql_insert import addStockThsSelException
from dbse.mysql.db_mysql_insert import addStockKpi
from dbse.mysql.db_mysql_insert import getDataBase
from dbse.mysql.db_mysql_insert import updateGnCompleted
from proxy.selenium_proxy_util import initChrome
from dbse.mysql.db_mysql_insert import deleteGnStocks
from dbse.mysql.db_entity_stock_ths_gn_stocks import StockThsGNStocks
from dbse.mysql.db_entity_stock_kpi_day import StockKpiDay
from dbse.mysql.db_entity_stock_ths_exception import StockThsSelException
from selenium.webdriver import ChromeOptions
from selenium.webdriver.support.ui import WebDriverWait
from sel.proxy.selenium_proxy_auth_plugin import create_proxy_auth_extension as cpae
from utils.StringUtil import simpleReplace
from utils.DateUtil import getTodayStr
from selenium.webdriver.support import expected_conditions as EC
from file.readFile import read
import datetime
import json
from sel.proxy.selenium_proxy_util import initSelWireChrome
from mq.kafka.DfcfStockKafkaConsumer import DfcfStockKafkaConsumer

def addException(db,stockCode):
    sql = 'INSERT INTO stock_ths_sel_exception (biz_code,sel_type,sta_date) VALUES (%s,%s,%s)'
    entity = StockThsSelException(stockCode,'1',getTodayStr())
    addStockThsSelException(db,sql,entity)

# 处理一页数据
def sel_gn_stocks(browser,db,add_sql,gn):
    gnCode = gn[0]                                              # 概念代码
    today = datetime.date.today().strftime("%Y-%m-%d")          # 时间
    content = browser.page_source
    # print(content)
    html = etree.fromstring(content,etree.HTMLParser())
    # result = html.xpath('//tbody//tr')                          # 股票数据
    result = html.xpath('//div[@id="maincont"]//tbody//tr')       # 股票数据 注意: 网页有多个版本 有的网页有两个tbody 所有要再通过div id=maincont 过滤一层
    for index, tr in enumerate(result):
        tds = tr.getchildren()
        # splitUrlItems = tds[1].find('a').get('href').split("/")
        # gnCode = splitUrlItems[len(splitUrlItems)-2]
        # print(f"====index:{index}")
        # print(tds[1].find('a').text)                # 股票代码
        # print(tds[2].find('a').text)                # 股票名称
        entity = StockThsGNStocks(tds[1].find('a').text,tds[2].find('a').text,today,gnCode)
        addThsGNStocks(db,add_sql,entity)


def initKafkaConsumer(browser,db,redis):

    # Kafka消费者配置
    config = {
        #'bootstrap.servers': '39.98.82.109:29092,39.98.82.109:39092',  # Kafka服务器地址
        # 'bootstrap.servers': '192.168.0.110:9092',
        'bootstrap.servers': '192.168.0.111:9092',
        # 'group.id': f'python-consumer-group-{batch}',    # 消费者组ID
        'group.id': f'python-consumer-group-1',    # 消费者组ID
        # 'auto.offset.reset': 'latest',
        # 'auto.offset.reset': 'earliest',        # 从最早的消息开始消费
        'enable.auto.commit': True,             # 自动提交偏移量
    }

    # 要订阅的主题
    topics = ['dfcf_stock_topic_tecent']

    # 创建并启动消费者
    consumer = DfcfStockKafkaConsumer(config, topics,browser,db,0,redis)
    consumer.subscribe_to_topics()
    return consumer

# 通过代理跑的任务超过4600 条 6个批次逐步停止
# 因为部分任务跑的快 offset已经到了end ，异常件重放后会开启新代理去跑（每个私密代理只跑几个任务） 不划算
def stopWork(db):
    today = datetime.date.today().strftime("%Y-%m-%d")          # 时间
    query_sql = f'select count(*) from stock_dfcf_kpi_day t1 where t1.sta_date ={today}'
    cursor = db.cursor()
    cursor.execute(query_sql)
    result = cursor.fetchone()
    print(f'count is {result[0]}')
    if(result[0] > 4600):
        return True
    else:
        return False


def work(profile,error):
    try:
        browser = None          ## 不加这一行 init如果抛异常 那么brower ！=None会报错  UnboundLocalError: cannot access local variable 'browser' where it is not associated with a value
        redis = None
        consumer= None
        db= None

        db = getDataBase(profile)
        if(stopWork(db)):
            print('=================当天任务已完成=================')
            return

        browser = initSelWireChrome(useProxy=False,isHeadless=True,host='https://quote.eastmoney.com',setTimeOut=True,ignorePic=True)
        redis = getConnection(profile)
        consumer = initKafkaConsumer(browser,db,redis)
        restart = consumer.consume_messages()
        if(restart):
            if(browser != None):
                browser.quit()
            consumer.close()
            work(pProfile,error)

    except Exception as e:
        error = error + 1
        print('========发生异常，重启任务=======')
        print(f"异常信息: {e}")
        print("详细堆栈信息:")
        traceback.print_exc()  # 打印完整的堆栈跟踪
        if(browser != None):
            browser.quit()                      #释放浏览器进程

        if(db != None):
            db.close()                          # 释放db资源

        if(consumer!=None):
            consumer.close()                    # 释放kafka资源

        if(redis!=None):
            redis.close()                       # 释放redis资源
        if(error > 5):
            print("===========异常次数过多,重新拉起进程==========")
            sys.exit()
        else:
            print("=================重启chrome================")
            work(pProfile,error)
        # print(f"消费者发生错误: {e}")
        # sys.exit(1)

if __name__ == "__main__":
    pProfile = sys.argv[1:][0]
    work(pProfile,0)









