# from env_check import install_requirements
# install_requirements()

import json
import math
import random
import re
import time
from datetime import datetime
import threading
import copy
import hashlib
from loguru import logger
from scrapy import Selector
from bs4 import BeautifulSoup, NavigableString, Tag, Comment
# import requests
import mysql.connector
from requests.utils import cookiejar_from_dict
from datetime import datetime, timedelta
import json
import re
from getcookie import get_cookie
from redis import StrictRedis
from loguru import logger
from curl_cffi import requests
from fake_useragent import UserAgent
from get_proxy import get_random_proxy


lua = UserAgent()
REDIS_URL = f'redis://r-2zer2v9lrdl75a694s:tianmai_2018@r-2zer2v9lrdl75a694spd.redis.rds.aliyuncs.com:6379/14'
conn_redis = StrictRedis.from_url(REDIS_URL, encoding='utf-8', decode_responses=True)


# 设置数据库连接
# db_config = {
#     'host': '127.0.0.1',  # MySQL 服务器地址
#     'user': 'root',  # MySQL 用户名
#     'password': '1234',  # MySQL 密码
#     'database': 'tender',  # MySQL 数据库名
# }

db_config = {
    'host': 'pc-2ze9oh2diu5e5firh.rwlb.rds.aliyuncs.com',  # MySQL 服务器地址
    'port': 3306,
    'user': 'data_collection',  # MySQL 用户名
    'password': 'CRNabzFQ2H',  # MySQL 密码
    'database': 'internal_collection',  # MySQL 数据库名
}


# 连接 MySQL 数据库
def get_db_connection():
    return mysql.connector.connect(**db_config)


# 插入数据到 MySQL
def insert_data_to_mysql(data,conn,cursor):
    # # 获取数据库连接
    # conn = get_db_connection()
    # cursor = conn.cursor()
    try:
        keys = ', '.join(data.keys())
        set_clause = ', '.join([f"%s" for key in data.keys()])
        sql = f"INSERT INTO machine_tool_site ({keys}) VALUES ({set_clause})"
        values = list(data.values())
        # sql = f"update source_intelligence_light_application set project_type='{item.get('project_type','')}',abstract='{item.get('abstract','')}',all_text='{item['all_text']}',article_text='{(item.get('article_text',''))}',keyword='{item.get('keyword','')}',type='{item.get('type','')}',label='{item.get('label','')}',fbtime='{item.get('fbtime','')}',source='{item.get('source','')}',img_json='{item.get('img_json','')}',demand_sources='{item.get('demand_sources','')}',application_scenarios='{item.get('application_scenarios','')}',file_json='{item.get('file_json','')}',publish_status='{item.get('publish_status','')}',languages='{item.get('languages','')}',content='{item.get('content','')}',is_deleted=0 where pmid = '{item['pmid']}'"
        cursor.execute(sql, values)
        conn.commit()

        logger.info(f"成功插入数据: {data['product_name']}")

    except mysql.connector.Error as err:
        logger.error(f"插入数据失败: {err}")
    # finally:
    #     cursor.close()
    #     conn.close()


def update_item(conn,cursor,data: dict):
    try:
        keys = ', '.join(data.keys())
        set_clause = ', '.join([f"{key} = %s" for key in data.keys()])
        sql = f"UPDATE machine_tool_site SET {set_clause} WHERE pmid = %s"
        values = list(data.values()) + [data['pmid']]
        cursor.execute(sql, values)
        conn.commit()
        logger.info(f'更新成功{data["pmid"]}')

    except mysql.connector.Error as err:
        logger.error(f"插入数据失败: {err}")

def get_tag_content(soup):
    """
    获取标签内容
    """
    soup = BeautifulSoup(soup, 'html.parser') if not isinstance(soup, Tag) else soup
    # 提取纯文本
    pure_text = soup.get_text()
    # 清理多余的空白字符
    clean_text = ' '.join(pure_text.split())
    return clean_text


def get_md5(content: str):
    """
    md5加密
    Args:
        content (str): 文本内容
    """
    return hashlib.md5(content.encode(encoding='utf-8')).hexdigest()


def select_item(conn,cursor,item: dict):
    # conn = get_db_connection()
    # cursor = conn.cursor()
    sql = f"select count(pmid) as mid_count from listing_enterprise_circulation_shareholder where pmid = '{item['pmid']}'"
    cursor.execute(sql)
    result = cursor.fetchone()
    return result


def get_sql_data(conn,cursor):
    sql = "SELECT pmid,url FROM machine_tool_site WHERE brand_origin != '国产' AND brand_origin != '进口' AND brand_origin != '' AND brand_origin IS NOT NULL"
    cursor.execute(sql)
    result = cursor.fetchall()
    return result


class Jc35Data:
    def __init__(self,conn,cursor):
        self.headers = {
            "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "accept-language": "zh-CN,zh;q=0.9",
            "cache-control": "max-age=0",
            "if-modified-since": "Tue, 08 Jul 2025 07:53:56 GMT",
            "if-none-match": "W/\"7264fc74ddefdb1:0\"",
            "priority": "u=0, i",
            "sec-ch-ua": "\"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"138\", \"Microsoft Edge\";v=\"138\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "\"Windows\"",
            "sec-fetch-dest": "document",
            "sec-fetch-mode": "navigate",
            "sec-fetch-site": "none",
            "sec-fetch-user": "?1",
            "upgrade-insecure-requests": "1",
            "user-agent": lua.random
        }
        self.session = requests.Session()
        self.conn = conn
        self.cursor = cursor
        self.cookies = {}

    def requests_start(self, url, **kwargs):
        # self.headers['user-agent'] = lua.random
        response = False
        max_count = 51
        # proxy = get_random_proxy()
        for i in range(1, max_count):
            # time.sleep(random.randint(1, 3))
            try:
                logger.info(f'尝试第{i}次请求：{url}')
                method = kwargs.get('method', '')
                if method.lower() != 'post':
                    params = kwargs.get('params', '')
                    if params:
                        response = requests.get(url, headers=self.headers, params=params, timeout=10,
                                                impersonate="chrome", cookies=self.cookies)
                    else:
                        response = requests.get(url, headers=kwargs.get('headers', self.headers), timeout=10,
                                                impersonate="chrome", cookies=self.cookies)
                else:
                    response = requests.post(url, headers=kwargs.get('headers', ''), data=kwargs.get('data', ''),
                                             timeout=10, impersonate="chrome", cookies=self.cookies)
                if response.status_code == 200:
                    return response
                else:
                    if response.status_code == 404 and ': 抱歉, 您所查找的页面不存在, 可能已被删除或您输错了网址!' in response.text:
                        return response
                    else:
                        # time.sleep(random.randint(60, 100))
                        # time.sleep(random.randint(1, 3))
                        if response.status_code == 468:
                            pass
                            # proxy = get_random_proxy()
                        else:
                            pass
                            # proxy = get_random_proxy()
                        self.cookies = get_cookie({})
                        continue
            except:
                # time.sleep(random.randint(60, 100))
                # time.sleep(random.randint(1, 3))
                # proxy = get_random_proxy()
                continue

        return response



    def parse_details(self, url, meta):
        headers = {
            "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
            "cache-control": "max-age=0",
            "priority": "u=0, i",
            "sec-ch-ua": "\"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"138\", \"Microsoft Edge\";v=\"138\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "\"Windows\"",
            "sec-fetch-dest": "document",
            "sec-fetch-mode": "navigate",
            "sec-fetch-site": "none",
            "sec-fetch-user": "?1",
            "upgrade-insecure-requests": "1",
            "user-agent": lua.random
        }
        # cookies = {
        #     "ASP.NET_SessionId": "cxqhcfl1gtchefms5csy0ze0"
        # }
        # url = 'https://www.jc35.com/st234131/product_2200252.html'
        response_details = self.requests_start(url,headers=headers)
        # response_details = self.requests_start("https://www.jc35.com/st236274/product_1757373.html",headers=headers,cookies=cookies)
        if ': 抱歉, 您所查找的页面不存在, 可能已被删除或您输错了网址!' in response_details.text:
            brand_name = ''
            brand_location = ''
            brand_origin = ''
            application_area = ''
            save_item = {}
            save_item['first_level_product_name'] = meta['first_level_product_name']
            save_item['two_level_product_name'] = meta['two_level_product_name']
            save_item['three_level_product_name'] = meta['three_level_product_name']
            save_item['product_name'] = meta['product_name']
            save_item['company_name'] = meta['company_name']
            save_item['member_level'] = meta['member_level']
            save_item['member_year'] = meta['member_year']
            save_item['brand_name'] = brand_name
            save_item['brand_location'] = brand_location
            save_item['brand_origin'] = brand_origin
            save_item['application_area'] = application_area
            save_item['url'] = url
            company_description = ''
            save_item['company_description'] = company_description

            save_item['pmid'] = get_md5(
                f"{save_item['first_level_product_name']}{save_item['two_level_product_name']}{save_item['three_level_product_name']}{save_item['product_name']}{save_item['company_name']}{save_item['company_name']}{save_item['url']}")
            now = datetime.now()
            save_item['created'] = now.strftime('%Y-%m-%d %H:%M:%S')

            insert_data_to_mysql(save_item, self.conn, self.cursor)
        else:
            if not response_details:
                with open('parse_details.txt', 'a', encoding='utf-8') as f:
                    f.write(f'{str(url)},\n')
                    raise Exception(f'无法访问！！！{url}')
            else:
                root = Selector(text=response_details.text)
                lis = root.xpath('//div[@class="model"]/p') or root.xpath('//div[contains(@class,"proDetail0")]/ul/li') or root.xpath('//div[@class="pro_right_bot"]/ul/li')
                brand_name = ''
                brand_location = ''
                brand_origin = ''
                application_area = ''
                for li in lis:
                    type_name = ''.join(li.xpath('./span/text()').getall()).strip().replace('\xa0', '')
                    if '品牌' in type_name:
                        brand_name = li.xpath('./text()').get('').strip() or li.xpath('./b/text()').get('').strip() or li.xpath('./p/text()').get('').strip()
                    if '所在地' in type_name:
                        brand_location = li.xpath('./text()').get('').strip() or li.xpath('./b/text()').get('').strip() or li.xpath('./p/text()').get('').strip()

                brand_origin_lis = []
                lis_1 = root.xpath('//tr[@class="trBg"]/parent::*/tr')
                for li_1 in lis_1:
                    lis_1_1 = li_1.xpath('./th')
                    for li_1_1 in lis_1_1:
                        text = li_1_1.xpath('./text()').get('').strip().replace('\xa0', '')
                        # index_now = lis_1_1.index(li_1_1)
                        if '产地' == text:
                            # brand_origin = lis_1.xpath('./td')[index_now].xpath('./text()').get('').strip()
                            brand_origin__ = li_1_1.xpath('following-sibling::td[1]/text()').get('').strip()
                            brand_origin_lis.append(brand_origin__)
                for brand_origin_ in brand_origin_lis:
                    if brand_origin_ == '国产' or brand_origin_ == '进口':
                        brand_origin = brand_origin_
                        break
                    else:
                        brand_origin = brand_origin_

                # content_lis = root.xpath('//p | //h1')
                # for content_li in content_lis:
                #     content_str = content_li.xpath('.//text()').get(default='').strip().replace(' ', '')
                #     # if '应用领域' in content_str or '应用范围' in content_str:
                #     keywords = ['应用领域', '应用范围', '适应行业']
                #     if any(kw in content_str for kw in keywords):
                #         # 获取该标签后的第一个兄弟标签，提取其文本
                #         next_tag = content_li.xpath('following-sibling::p[1]')
                #         if next_tag:
                #             content_text = next_tag.xpath('.//text()').getall()
                #             application_area = ''.join([t.strip() for t in content_text])

                if brand_origin:
                    meta['brand_origin'] = brand_origin
                    update_item(self.conn,self.cursor,meta)


                # meta['brand_origin'] = brand_origin
                # meta['application_area'] = application_area


                # save_item = {}
                # save_item['first_level_product_name'] = meta['first_level_product_name']
                # save_item['two_level_product_name'] = meta['two_level_product_name']
                # save_item['three_level_product_name'] = meta['three_level_product_name']
                # save_item['product_name'] = meta['product_name']
                # save_item['company_name'] = meta['company_name']
                # save_item['member_level'] = meta['member_level']
                # save_item['member_year'] = meta['member_year']
                # save_item['brand_name'] = brand_name
                # save_item['brand_location'] = brand_location
                # save_item['brand_origin'] = brand_origin
                # save_item['application_area'] = application_area
                # save_item['url'] = url





                # company_url = root.xpath('//a[contains(text(),"公司档案")]/@href').get('')
                # if company_url:
                #     if 'https' not in company_url:
                #         company_url = 'https://www.jc35.com' + company_url
                #     self.company_details(company_url, save_item)
                # else:
                #     with open('error_company_url_find.txt', 'a', encoding='utf-8') as f:
                #         f.write(f'{str(url)},\n')
                #         raise Exception(f'无法访问！！！{url}')

    def company_details(self, url, save_item):
        headers = {
            "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "accept-language": "zh-CN,zh;q=0.9",
            "priority": "u=0, i",
            "sec-ch-ua": "\"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"138\", \"Microsoft Edge\";v=\"138\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "\"Windows\"",
            "sec-fetch-dest": "document",
            "sec-fetch-mode": "navigate",
            "sec-fetch-site": "none",
            "sec-fetch-user": "?1",
            "upgrade-insecure-requests": "1",
            "user-agent": lua.random
        }
        response_company = self.requests_start(url, headers=headers)
        if not response_company:
            with open('company_details.txt', 'a', encoding='utf-8') as f:
                f.write(f'{str(url)},\n')
                raise Exception(f'公司简介正文xpath定位不到！！！{url}')
        else:
            root = Selector(text=response_company.text)
            content_text = root.xpath(
                '//div[@class="com_profile"]/div[@class="container"]|//div[@class="aboutUsText"]|//div[@class="intro-content"]|//div[@class="companyIntro"]|//div[@class="introduce"]/div[@class="content"]|//div[@class="intro_box"]/div[@class="text"]|//div[@class="Introduction"]').get()
            try:
                company_description = get_tag_content(soup=content_text).replace('抱歉，由于流量限制，视频无法播放。', '')
            except:
                company_description = ''
                with open('error_company_company_description_find.txt', 'a', encoding='utf-8') as f:
                    f.write(f'{str(url)}：{save_item["url"]},\n')
                    logger.info(f'无法访问！！！{url}')
            save_item['company_description'] = company_description

            save_item['pmid'] = get_md5(
                f"{save_item['first_level_product_name']}{save_item['two_level_product_name']}{save_item['three_level_product_name']}{save_item['product_name']}{save_item['company_name']}{save_item['company_name']}{save_item['url']}")
            now = datetime.now()
            save_item['created'] = now.strftime('%Y-%m-%d %H:%M:%S')

            insert_data_to_mysql(save_item, self.conn, self.cursor)



def turn_update():
    conn_sql = get_db_connection()
    cursor_sql = conn_sql.cursor()

    spider = Jc35Data(conn_sql,cursor_sql)
    # while True:
    data_list = get_sql_data(conn_sql,cursor_sql)
    for data in data_list:
        pmid,url = data
        item = {
            'pmid':pmid,
            'url':url,
        }
        try:
            spider.parse_details(url,item)
            # time.sleep(random.uniform(2,5))
        except Exception as e:
            logger.error(f'请求错误：{e}')
            conn_redis.sadd('get_jc35_data:pages', data)
    cursor_sql.close()
    conn_sql.close()


def start_threaded_spider(thread_count=2):
    threads = []
    for i in range(thread_count):
        t = threading.Thread(target=turn_update, name=f"SpiderThread-{i+1}")
        t.start()
        threads.append(t)
        logger.info(f"线程 {t.name} 启动")

    for t in threads:
        t.join()
        logger.info(f"线程 {t.name} 结束")

if __name__ == "__main__":
    # start_threaded_spider(thread_count=5)
    turn_update()