# from env_check import install_requirements
# install_requirements()
import queue
import math
import json
import math
import random
import re
import time
from datetime import datetime
import threading
import copy
import hashlib
from loguru import logger
from scrapy import Selector
from bs4 import BeautifulSoup, NavigableString, Tag, Comment
# import requests
import mysql.connector
from requests.utils import cookiejar_from_dict
from datetime import datetime, timedelta
import json
import re
from getcookie import get_cookie
from redis import StrictRedis
from loguru import logger
from curl_cffi import requests
from fake_useragent import UserAgent
from get_proxy import get_random_proxy
from application import get_application

lua = UserAgent()
REDIS_URL = f'redis://r-2zer2v9lrdl75a694s:tianmai_2018@r-2zer2v9lrdl75a694spd.redis.rds.aliyuncs.com:6379/14'
conn_redis = StrictRedis.from_url(REDIS_URL, encoding='utf-8', decode_responses=True)


# 设置数据库连接
# db_config = {
#     'host': '127.0.0.1',  # MySQL 服务器地址
#     'user': 'root',  # MySQL 用户名
#     'password': '1234',  # MySQL 密码
#     'database': 'tender',  # MySQL 数据库名
# }

db_config = {
    'host': 'pc-2ze9oh2diu5e5firh.rwlb.rds.aliyuncs.com',  # MySQL 服务器地址
    'port': 3306,
    'user': 'data_collection',  # MySQL 用户名
    'password': 'CRNabzFQ2H',  # MySQL 密码
    'database': 'internal_collection',  # MySQL 数据库名
}


# 连接 MySQL 数据库
def get_db_connection():
    return mysql.connector.connect(**db_config)


# 插入数据到 MySQL
def insert_data_to_mysql(data,conn,cursor):
    # # 获取数据库连接
    # conn = get_db_connection()
    # cursor = conn.cursor()
    try:
        keys = ', '.join(data.keys())
        set_clause = ', '.join([f"%s" for key in data.keys()])
        sql = f"INSERT INTO machine_tool_site ({keys}) VALUES ({set_clause})"
        values = list(data.values())
        # sql = f"update source_intelligence_light_application set project_type='{item.get('project_type','')}',abstract='{item.get('abstract','')}',all_text='{item['all_text']}',article_text='{(item.get('article_text',''))}',keyword='{item.get('keyword','')}',type='{item.get('type','')}',label='{item.get('label','')}',fbtime='{item.get('fbtime','')}',source='{item.get('source','')}',img_json='{item.get('img_json','')}',demand_sources='{item.get('demand_sources','')}',application_scenarios='{item.get('application_scenarios','')}',file_json='{item.get('file_json','')}',publish_status='{item.get('publish_status','')}',languages='{item.get('languages','')}',content='{item.get('content','')}',is_deleted=0 where pmid = '{item['pmid']}'"
        cursor.execute(sql, values)
        conn.commit()

        logger.info(f"成功插入数据: {data['product_name']}")

    except mysql.connector.Error as err:
        logger.error(f"插入数据失败: {err}")
    # finally:
    #     cursor.close()
    #     conn.close()


def update_item(conn,cursor,data: dict):
    try:
        keys = ', '.join(data.keys())
        set_clause = ', '.join([f"{key} = %s" for key in data.keys()])
        sql = f"UPDATE machine_tool_site SET {set_clause} WHERE pmid = %s"
        values = list(data.values()) + [data['pmid']]
        cursor.execute(sql, values)
        conn.commit()
        logger.info(f'更新成功{data["pmid"]}')

    except mysql.connector.Error as err:
        logger.error(f"插入数据失败: {err}")

def get_tag_content(soup):
    """
    获取标签内容
    """
    soup = BeautifulSoup(soup, 'html.parser') if not isinstance(soup, Tag) else soup
    # 提取纯文本
    pure_text = soup.get_text()
    # 清理多余的空白字符
    clean_text = ' '.join(pure_text.split())
    return clean_text


def get_md5(content: str):
    """
    md5加密
    Args:
        content (str): 文本内容
    """
    return hashlib.md5(content.encode(encoding='utf-8')).hexdigest()


def select_item(conn,cursor,item: dict):
    # conn = get_db_connection()
    # cursor = conn.cursor()
    sql = f"select count(pmid) as mid_count from listing_enterprise_circulation_shareholder where pmid = '{item['pmid']}'"
    cursor.execute(sql)
    result = cursor.fetchone()
    return result


def get_sql_data(conn,cursor):
    # sql = "SELECT pmid,url FROM `machine_tool_site` WHERE application_area IS NOT NULL AND application_area != ''"
    # sql = "SELECT pmid,url FROM `machine_tool_site` WHERE application_area IS NOT NULL AND application_area != ''"
    sql = "SELECT pmid,url FROM `machine_tool_site` WHERE application_area IS NOT NULL AND application_area != '' AND (application_area_amend IS NULL OR application_area_amend = '')"
    cursor.execute(sql)
    result = cursor.fetchall()
    return result


class Jc35Data:
    def __init__(self,conn,cursor):
        self.headers = {
            "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "accept-language": "zh-CN,zh;q=0.9",
            "cache-control": "max-age=0",
            "if-modified-since": "Tue, 08 Jul 2025 07:53:56 GMT",
            "if-none-match": "W/\"7264fc74ddefdb1:0\"",
            "priority": "u=0, i",
            "sec-ch-ua": "\"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"138\", \"Microsoft Edge\";v=\"138\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "\"Windows\"",
            "sec-fetch-dest": "document",
            "sec-fetch-mode": "navigate",
            "sec-fetch-site": "none",
            "sec-fetch-user": "?1",
            "upgrade-insecure-requests": "1",
            "user-agent": lua.random
        }
        self.session = requests.Session()
        self.conn = conn
        self.cursor = cursor
        self.cookies = {}

    def requests_start(self, url, **kwargs):
        # self.headers['user-agent'] = lua.random
        response = False
        max_count = 51
        # proxy = get_random_proxy()
        for i in range(1, max_count):
            time.sleep(random.randint(1, 3))
            try:
                logger.info(f'尝试第{i}次请求：{url}')
                method = kwargs.get('method', '')
                if method.lower() != 'post':
                    params = kwargs.get('params', '')
                    if params:
                        response = requests.get(url, headers=self.headers, params=params, timeout=10,
                                                impersonate="chrome", cookies=self.cookies)
                    else:
                        response = requests.get(url, headers=kwargs.get('headers', self.headers), timeout=10,
                                                impersonate="chrome", cookies=self.cookies)
                else:
                    response = requests.post(url, headers=kwargs.get('headers', ''), data=kwargs.get('data', ''),
                                             timeout=10, impersonate="chrome", cookies=self.cookies)
                if response.status_code == 200:
                    return response
                else:
                    if response.status_code == 404 and ': 抱歉, 您所查找的页面不存在, 可能已被删除或您输错了网址!' in response.text:
                        return response
                    else:
                        # time.sleep(random.randint(60, 100))
                        time.sleep(random.randint(1, 3))
                        if response.status_code == 468:
                            pass
                            # proxy = get_random_proxy()
                        else:
                            pass
                            # proxy = get_random_proxy()
                        self.cookies = get_cookie({})
                        continue
            except:
                # time.sleep(random.randint(60, 100))
                time.sleep(random.randint(1, 3))
                proxy = get_random_proxy()
                continue

        return response



    def parse_details(self, url, meta):
        headers = {
            "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
            "cache-control": "max-age=0",
            "priority": "u=0, i",
            "sec-ch-ua": "\"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"138\", \"Microsoft Edge\";v=\"138\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "\"Windows\"",
            "sec-fetch-dest": "document",
            "sec-fetch-mode": "navigate",
            "sec-fetch-site": "none",
            "sec-fetch-user": "?1",
            "upgrade-insecure-requests": "1",
            "user-agent": lua.random
        }
        # cookies = {
        #     "ASP.NET_SessionId": "cxqhcfl1gtchefms5csy0ze0"
        # }
        # url = 'https://www.jc35.com/st224004/product_2190160.html'
        response_details = self.requests_start(url,headers=headers)
        # response_details = self.requests_start("https://www.jc35.com/st236274/product_1757373.html",headers=headers,cookies=cookies)
        if ': 抱歉, 您所查找的页面不存在, 可能已被删除或您输错了网址!' in response_details.text:
            return
        else:
            if not response_details:
                with open('parse_details.txt', 'a', encoding='utf-8') as f:
                    f.write(f'{str(url)},\n')
                    raise Exception(f'无法访问！！！{url}')
            else:
                root = Selector(text=response_details.text)
                lis = root.xpath('//div[@class="model"]/p') or root.xpath('//div[contains(@class,"proDetail0")]/ul/li') or root.xpath('//div[@class="pro_right_bot"]/ul/li')
                # brand_name = ''
                # brand_location = ''
                brand_origin = ''
                application_area = ''
                # for li in lis:
                #     type_name = ''.join(li.xpath('./span/text()').getall()).strip().replace('\xa0','')
                #     if '品牌' in type_name:
                #         brand_name = li.xpath('./text()').get('').strip() or li.xpath('./b/text()').get('').strip() or li.xpath('./p/text()').get('').strip()
                #     if '所在地' in type_name:
                #         brand_location = li.xpath('./text()').get('').strip() or li.xpath('./b/text()').get('').strip() or li.xpath('./p/text()').get('').strip()

                lis_1 = root.xpath('//tr[@class="trBg"]/parent::*/tr')
                for li_1 in lis_1:
                    lis_1_1 = li_1.xpath('./th')
                    for li_1_1 in lis_1_1:
                        text = li_1_1.xpath('./text()').get('').strip().replace('\xa0', '')
                        # index_now = lis_1_1.index(li_1_1)
                        if '产地' in text:
                            # brand_origin = lis_1.xpath('./td')[index_now].xpath('./text()').get('').strip()
                            brand_origin = li_1_1.xpath('following-sibling::td[1]/text()').get('').strip()
                try:
                    application_area = get_application(response_details.text)
                except Exception as e:
                    logger.error(f'获取应用领域失败：{e}')

                if application_area:
                    meta['application_area_amend'] = application_area
                    update_item(self.conn,self.cursor,meta)




task_queue = queue.Queue()

def worker_thread(name):
    conn_sql = get_db_connection()
    cursor_sql = conn_sql.cursor()
    spider = Jc35Data(conn_sql, cursor_sql)

    while not task_queue.empty():
        try:
            pmid, url = task_queue.get_nowait()
        except queue.Empty:
            break

        item = {'pmid': pmid, 'url': url}
        try:
            spider.parse_details(url, item)
        except Exception as e:
            logger.error(f"[{name}] 请求错误：{e}")
            conn_redis.sadd('get_jc35_data:pages', json.dumps(item, ensure_ascii=False))

    cursor_sql.close()
    conn_sql.close()
    logger.info(f"[{name}] 线程退出")


def start_batched_spider(batch_size=5):
    # 获取所有任务数据
    conn_sql = get_db_connection()
    cursor_sql = conn_sql.cursor()
    data_list = get_sql_data(conn_sql, cursor_sql)
    cursor_sql.close()
    conn_sql.close()

    # 将数据按批次拆分
    total = len(data_list)
    num_batches = math.ceil(total / batch_size)

    logger.info(f"共需处理 {total} 条任务，分为 {num_batches} 批，每批 {batch_size} 个线程")

    for batch_index in range(num_batches):
        start = batch_index * batch_size
        end = min((batch_index + 1) * batch_size, total)
        batch_data = data_list[start:end]

        # 清空队列并重新填充当前批次任务
        while not task_queue.empty():
            task_queue.get()

        for data in batch_data:
            task_queue.put(data)

        logger.info(f"启动第 {batch_index + 1} 批线程，共 {len(batch_data)} 条任务")

        threads = []
        for i in range(batch_size):
            name = f"Batch-{batch_index + 1}-Thread-{i + 1}"
            t = threading.Thread(target=worker_thread, args=(name,))
            t.start()
            threads.append(t)

        for t in threads:
            t.join()

        logger.info(f"第 {batch_index + 1} 批线程执行完毕")


if __name__ == "__main__":
    start_batched_spider(batch_size=10)