import json
import math
import random
import re
import time
from datetime import datetime

import copy
import hashlib
from loguru import logger
from scrapy import Selector
from bs4 import BeautifulSoup, NavigableString, Tag, Comment
# import requests
import mysql.connector
from requests.utils import cookiejar_from_dict
from datetime import datetime, timedelta
import json
import re
from getcookie import get_cookie
from redis import StrictRedis
from loguru import logger
from curl_cffi import requests
from fake_useragent import UserAgent
from get_proxy import get_random_proxy


lua = UserAgent()
REDIS_URL = f'redis://r-2zer2v9lrdl75a694s:tianmai_2018@r-2zer2v9lrdl75a694spd.redis.rds.aliyuncs.com:6379/14'
conn_redis = StrictRedis.from_url(REDIS_URL, encoding='utf-8', decode_responses=True)


# 设置数据库连接
db_config = {
    'host': '127.0.0.1',  # MySQL 服务器地址
    'user': 'root',  # MySQL 用户名
    'password': '1234',  # MySQL 密码
    'database': 'tender',  # MySQL 数据库名
}

# db_config = {
#     'host': 'pc-2ze9oh2diu5e5firh.rwlb.rds.aliyuncs.com',  # MySQL 服务器地址
#     'port': 3306,
#     'user': 'data_collection',  # MySQL 用户名
#     'password': 'CRNabzFQ2H',  # MySQL 密码
#     'database': 'internal_collection',  # MySQL 数据库名
# }


# 连接 MySQL 数据库
def get_db_connection():
    return mysql.connector.connect(**db_config)


# 插入数据到 MySQL
def insert_data_to_mysql(data,conn,cursor):
    # # 获取数据库连接
    # conn = get_db_connection()
    # cursor = conn.cursor()
    try:
        keys = ', '.join(data.keys())
        set_clause = ', '.join([f"%s" for key in data.keys()])
        sql = f"INSERT INTO test_jichuangwang ({keys}) VALUES ({set_clause})"
        values = list(data.values())
        # sql = f"update source_intelligence_light_application set project_type='{item.get('project_type','')}',abstract='{item.get('abstract','')}',all_text='{item['all_text']}',article_text='{(item.get('article_text',''))}',keyword='{item.get('keyword','')}',type='{item.get('type','')}',label='{item.get('label','')}',fbtime='{item.get('fbtime','')}',source='{item.get('source','')}',img_json='{item.get('img_json','')}',demand_sources='{item.get('demand_sources','')}',application_scenarios='{item.get('application_scenarios','')}',file_json='{item.get('file_json','')}',publish_status='{item.get('publish_status','')}',languages='{item.get('languages','')}',content='{item.get('content','')}',is_deleted=0 where pmid = '{item['pmid']}'"
        cursor.execute(sql, values)
        conn.commit()

        logger.info(f"成功插入数据: {data['product_name']}")

    except mysql.connector.Error as err:
        logger.error(f"插入数据失败: {err}")
    # finally:
    #     cursor.close()
    #     conn.close()


def get_tag_content(soup):
    """
    获取标签内容
    """
    soup = BeautifulSoup(soup, 'html.parser') if not isinstance(soup, Tag) else soup
    # 提取纯文本
    pure_text = soup.get_text()
    # 清理多余的空白字符
    clean_text = ' '.join(pure_text.split())
    return clean_text


def get_md5(content: str):
    """
    md5加密
    Args:
        content (str): 文本内容
    """
    return hashlib.md5(content.encode(encoding='utf-8')).hexdigest()


def select_item(conn,cursor,item: dict):
    # conn = get_db_connection()
    # cursor = conn.cursor()
    sql = f"select count(pmid) as mid_count from listing_enterprise_circulation_shareholder where pmid = '{item['pmid']}'"
    cursor.execute(sql)
    result = cursor.fetchone()
    return result



class Jc35Data:
    def __init__(self,conn,cursor):
        self.headers = {
            "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "accept-language": "zh-CN,zh;q=0.9",
            "cache-control": "max-age=0",
            "if-modified-since": "Tue, 08 Jul 2025 07:53:56 GMT",
            "if-none-match": "W/\"7264fc74ddefdb1:0\"",
            "priority": "u=0, i",
            "sec-ch-ua": "\"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"138\", \"Microsoft Edge\";v=\"138\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "\"Windows\"",
            "sec-fetch-dest": "document",
            "sec-fetch-mode": "navigate",
            "sec-fetch-site": "none",
            "sec-fetch-user": "?1",
            "upgrade-insecure-requests": "1",
            "user-agent": lua.random
        }
        self.session = requests.Session()
        self.conn = conn
        self.cursor = cursor
        self.cookies = {}


    def requests_start(self, url, **kwargs):
        # self.headers['user-agent'] = lua.random
        response = False
        max_count = 21
        proxy = get_random_proxy()
        for i in range(1, max_count):
            time.sleep(random.randint(2,5))
            try:
                logger.info(f'尝试第{i}次请求：{url}，使用代理：{proxy}')
                method = kwargs.get('method','')
                if method.lower() != 'post':
                    params = kwargs.get('params','')
                    if params:
                        response = requests.get(url, headers=self.headers, params=params, timeout=10,impersonate="chrome",proxies=proxy,cookies=self.cookies)
                    else:
                        response = requests.get(url, headers=kwargs.get('headers',self.headers),  timeout=10,impersonate="chrome",proxies=proxy,cookies=self.cookies)
                else:
                    response = requests.post(url, headers=kwargs.get('headers',''), data=kwargs.get('data',''), timeout=10,impersonate="chrome",proxies=proxy)
                if response.status_code == 200:
                    return response
                else:
                    if response.status_code == 468:
                        proxy = get_random_proxy()
                        self.cookies = get_cookie(proxy)
                    else:
                        proxy = get_random_proxy()
                    continue
            except:
                proxy = get_random_proxy()
                continue

        return response

    def start_requests(self):
        urls = [
            "https://www.jc35.com/"
        ]
        for url in urls:
            self.parse_list(url)

    def parse_list(self, url):
        response_ = self.requests_start(url)
        if not response_:
            with open('error_save_parse_list.txt','a', encoding='utf-8') as f:
                f.write(f'{str(url)},\n')
                logger.error(f'无法访问！！！{url}')
        else:
            root = Selector(text=response_.text)
            lis = root.xpath('//li[@class="brandli"]')
            for li in lis:
                first_level_product_name = li.xpath('./div[@class="class-a"]/div[@class="name"]/a/text()').get()
                two_lis = li.xpath('//div[@class="pro-type"]/div[@class="item"]')
                for two_li in two_lis:
                    two_level_product_name = two_li.xpath('./div[@class="label"]/a/text()').get()
                    three_lis = two_li.xpath('./div[@class="text-list"]//a')
                    for three_li in three_lis:
                        three_level_product_name = three_li.xpath('./text()').get()
                        three_url = three_li.xpath('./@href').get()

                        url_next = str(three_url).replace('.html', '') + "_p1.html"
                        id = re.findall(r'chanpin-(\d+)',url_next)[0] if re.findall(r'chanpin-(\d+)',url_next)[0] else ''
                        data = {
                            "T": f"{id}",
                            "P": "1",
                            "PID": "0",
                            "CID": "0",
                            "TID": "3",
                            "Sort": "1",
                            "FldSort": "0",
                            "PriceStart": "0",
                            "PriceEnd": "0",
                            "PBID": "0",
                            "K": "",
                            "JustPC": "1",
                            "PP": "0"
                        }
                        type_dict = {
                            "first_level_product_name": first_level_product_name,
                            "two_level_product_name": two_level_product_name,
                            "three_level_product_name": three_level_product_name,
                            "url_next": url_next,
                            "post_data": data,
                            "id": id,
                            "page": 1
                        }

                        self.parse_list_date(url_next, type_dict)


    def parse_list_date(self, url, meta):
        type_dict_ = copy.deepcopy(meta)
        headers = {
            "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
            "cache-control": "max-age=0",
            "content-type": "application/x-www-form-urlencoded",
            "origin": "https://www.jc35.com",
            "priority": "u=0, i",
            "referer": "https://www.jc35.com/chanpin-4202.html",
            "sec-ch-ua": "\"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"138\", \"Microsoft Edge\";v=\"138\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "\"Windows\"",
            "sec-fetch-dest": "document",
            "sec-fetch-mode": "navigate",
            "sec-fetch-site": "same-origin",
            "sec-fetch-user": "?1",
            "upgrade-insecure-requests": "1",
            "user-agent": lua.random
        }
        response_date = self.requests_start(url,data=meta['post_data'],method='post',headers=headers)
        if not response_date:
            with open('parse_list_date.txt', 'a', encoding='utf-8') as f:
                f.write(f'{str(url)},\n')
                logger.error(f'无法访问！！！{url}')
        else:
            root = Selector(text=response_date.text)
            lis = root.xpath('//div[@class="pro-list productsLists"]/ul/li')
            for li in lis:
                product_name = li.xpath('./div[@class="item"]/p/text()').get('').strip() or li.xpath('./div[@class="item"]/p/a/text()').get('').strip()
                company_name = li.xpath('./div[@class="item"]/div[@class="company"]/a/text()').get('').strip()
                member_level = li.xpath('./div[@class="item"]/div[@class="level"]/b/@class').get('').strip()
                member_year = ''.join(li.xpath('./div[@class="item"]/div[@class="year"]//text()').getall()).strip()
                # member_year = member_level + member_year
                details_url = li.xpath('./div[@class="item"]/a[@class="proLink"]/@href').get('').strip()
                type_dict_next = copy.deepcopy(meta)
                type_dict_next['product_name'] = product_name
                type_dict_next['company_name'] = company_name
                type_dict_next['member_level'] = member_level
                type_dict_next['member_year'] = member_year
                type_dict_next['details_url'] = details_url
                # print(type_dict_next)
                self.parse_details(details_url, type_dict_next)

        count = re.findall('页(\d+)条记录',response_date.text)[0] if re.findall('页(\d+)条记录',response_date.text) else ''
        total = math.ceil(int(count)/35)
        page_total = 100 if total>100 else total
        if type_dict_['page'] <= page_total:
            type_dict_['page'] += 1
            url_next_ = f'https://www.jc35.com/chanpin-{type_dict_["id"]}_p{type_dict_["page"]}.html'
            type_dict_['post_data']['P'] = f"{type_dict_['page']}"
            self.parse_list_date(url_next_, type_dict_)



    def parse_details(self, url, meta):
        headers = {
            "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
            "cache-control": "max-age=0",
            "priority": "u=0, i",
            "sec-ch-ua": "\"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"138\", \"Microsoft Edge\";v=\"138\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "\"Windows\"",
            "sec-fetch-dest": "document",
            "sec-fetch-mode": "navigate",
            "sec-fetch-site": "none",
            "sec-fetch-user": "?1",
            "upgrade-insecure-requests": "1",
            "user-agent": lua.random
        }
        # cookies = {
        #     "ASP.NET_SessionId": "cxqhcfl1gtchefms5csy0ze0"
        # }
        response_details = self.requests_start(url,headers=headers)
        # response_details = self.requests_start("https://www.jc35.com/st236274/product_1757373.html",headers=headers,cookies=cookies)
        if not response_details:
            with open('parse_details.txt', 'a', encoding='utf-8') as f:
                f.write(f'{str(url)},\n')
                logger.error(f'无法访问！！！{url}')
        else:
            root = Selector(text=response_details.text)
            lis = root.xpath('//div[@class="model"]/p')
            brand_name = ''
            brand_location = ''
            brand_origin = ''
            application_area = ''
            for li in lis:
                type_name = ''.join(li.xpath('./span/text()').getall()).strip().replace('\xa0','')
                if '品牌' in type_name:
                    brand_name = li.xpath('./text()').get('').strip()
                if '所在地' in type_name:
                    brand_location = li.xpath('./text()').get('').strip()

            lis_1 = root.xpath('//tr[@class="trBg"]')
            for li_1 in lis_1:
                lis_1_1 = li_1.xpath('./th')
                for li_1_1 in lis_1_1:
                    text = li_1_1.xpath('./text()').get('').strip().replace('\xa0','')
                    index_now = lis_1_1.index(li_1_1)
                    if '产地' in text:
                        brand_origin = lis_1.xpath('./td')[index_now].xpath('./text()').get('').strip()


            content_lis = root.xpath('//p | //h1')
            for content_li in content_lis:
                content_str = content_li.xpath('.//text()').get(default='').strip().replace(' ', '')
                if '应用领域' in content_str or '应用范围' in content_str:
                    # 获取该标签后的第一个兄弟标签，提取其文本
                    next_tag = content_li.xpath('following-sibling::p[1]')
                    if next_tag:
                        content_text = next_tag.xpath('.//text()').getall()
                        application_area = ''.join([t.strip() for t in content_text])


            save_item = {}
            save_item['first_level_product_name'] = meta['first_level_product_name']
            save_item['two_level_product_name'] = meta['two_level_product_name']
            save_item['three_level_product_name'] = meta['three_level_product_name']
            save_item['product_name'] = meta['product_name']
            save_item['company_name'] = meta['company_name']
            save_item['member_level'] = meta['member_level']
            save_item['member_year'] = meta['member_year']
            save_item['brand_name'] = brand_name
            save_item['brand_location'] = brand_location
            save_item['brand_origin'] = brand_origin
            save_item['application_area'] = application_area
            save_item['url'] = url

            company_url = root.xpath('//a[contains(text(),"公司档案")]/@href').get('')
            if company_url:
                if 'https' not in company_url:
                    company_url = 'https://www.jc35.com' + company_url
                self.company_details(company_url, save_item)
            else:
                with open('error_company_url_find.txt', 'a', encoding='utf-8') as f:
                    f.write(f'{str(url)},\n')
                    logger.error(f'无法访问！！！{url}')


    def company_details(self, url,save_item):
        headers = {
            "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "accept-language": "zh-CN,zh;q=0.9",
            "priority": "u=0, i",
            "sec-ch-ua": "\"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"138\", \"Microsoft Edge\";v=\"138\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "\"Windows\"",
            "sec-fetch-dest": "document",
            "sec-fetch-mode": "navigate",
            "sec-fetch-site": "none",
            "sec-fetch-user": "?1",
            "upgrade-insecure-requests": "1",
            "user-agent": lua.random
        }
        response_company = self.requests_start(url,headers=headers)
        if not response_company:
            with open('company_details.txt', 'a', encoding='utf-8') as f:
                f.write(f'{str(url)},\n')
                logger.error(f'无法访问！！！{url}')
        else:
            root = Selector(text=response_company.text)
            content_text = root.xpath('//div[@class="com_profile"]/div[@class="container"]').get()
            try:
                company_description = get_tag_content(soup=content_text)
            except:
                company_description = ''
                with open('error_company_company_description_find.txt', 'a', encoding='utf-8') as f:
                    f.write(f'{str(url)},\n')
                    logger.error(f'无法访问！！！{url}')
            save_item['company_description'] = company_description

            save_item['pmid'] = get_md5(f"{save_item['first_level_product_name']}{save_item['two_level_product_name']}{save_item['three_level_product_name']}{save_item['product_name']}{save_item['company_name']}{save_item['company_name']}{save_item['url']}")
            now = datetime.now()
            save_item['created'] = now.strftime('%Y-%m-%d %H:%M:%S')

            insert_data_to_mysql(save_item,self.conn,self.cursor)



if __name__ == "__main__":
    conn_sql = get_db_connection()
    cursor_sql = conn_sql.cursor()
    spider = Jc35Data(conn_sql,cursor_sql)
    spider.start_requests()