import csv
import json
import math
import os
import random
import re
import time
import copy
import hashlib
from datetime import datetime

from loguru import logger
from scrapy import Selector
from bs4 import BeautifulSoup, Tag
import mysql.connector
from curl_cffi import requests
from fake_useragent import UserAgent
from redis import StrictRedis
from getcookie import get_cookie
from get_proxy import get_random_proxy

lua = UserAgent()

REDIS_URL = f'redis://r-2zer2v9lrdl75a694s:tianmai_2018@r-2zer2v9lrdl75a694spd.redis.rds.aliyuncs.com:6379/14'
conn_redis = StrictRedis.from_url(REDIS_URL, encoding='utf-8', decode_responses=True)

db_config = {
    'host': 'pc-2ze9oh2diu5e5firh.rwlb.rds.aliyuncs.com',  # MySQL 服务器地址
    'port': 3306,
    'user': 'data_collection',  # MySQL 用户名
    'password': 'CRNabzFQ2H',  # MySQL 密码
    'database': 'internal_collection',  # MySQL 数据库名
}

# 连接 MySQL 数据库
def get_db_connection():
    return mysql.connector.connect(**db_config)

def get_tag_content(soup):
    soup = BeautifulSoup(soup, 'html.parser') if not isinstance(soup, Tag) else soup
    pure_text = soup.get_text()
    return ' '.join(pure_text.split())

def get_md5(content: str):
    return hashlib.md5(content.encode(encoding='utf-8')).hexdigest()

class Jc35Data:
    def __init__(self,conn_sql,cursor_sql):
        self.headers = {
            "user-agent": lua.random
        }
        self.session = requests.Session()
        self.cookies = {}
        self.conn = conn_sql
        self.cursor = cursor_sql

    def requests_start(self, url, **kwargs):
        response = False
        max_count = 100
        # proxy = get_random_proxy()
        for i in range(1, max_count):
            time.sleep(random.randint(1, 2))
            try:
                logger.info(f'尝试第{i}次请求：{url}')
                method = kwargs.get('method', '')
                if method.lower() != 'post':
                    params = kwargs.get('params', '')
                    if params:
                        response = requests.get(url, headers=self.headers, params=params, timeout=10, impersonate="chrome", cookies=self.cookies)
                    else:
                        response = requests.get(url, headers=self.headers, timeout=10, impersonate="chrome",  cookies=self.cookies)
                else:
                    response = requests.post(url, headers=kwargs.get('headers', self.headers), data=kwargs.get('data', ''), timeout=10, impersonate="chrome", cookies=self.cookies)
                if response.status_code == 200:
                    return response
                elif response.status_code == 468:
                    time.sleep(random.randint(2, 3))
                    # proxy = get_random_proxy()
                    self.cookies = get_cookie({})
                else:
                    time.sleep(random.randint(2, 3))
                    # proxy = get_random_proxy()
            except:
                time.sleep(random.randint(2, 3))
                # proxy = get_random_proxy()
        return response

    def select_count(self, cursor, first_level_product_name,two_level_product_name,three_level_product_name):
        sql = f"SELECT COUNT(three_level_product_name) as product_count FROM machine_tool_site WHERE first_level_product_name = '{first_level_product_name}' and two_level_product_name = '{two_level_product_name}' and three_level_product_name = '{three_level_product_name}'"
        cursor.execute(sql)
        count = cursor.fetchone()[0]
        return count


    def parse_list(self, url):
        response_ = self.requests_start(url)
        if not response_:
            with open('error_save_parse_list.txt', 'a', encoding='utf-8') as f:
                f.write(f'{url},\n')
            logger.error(f'无法访问：{url}')
            return

        root = Selector(text=response_.text)
        lis = root.xpath('//div[@class="product-classify w260"]/ul/li')

        for li in lis:
            first_level = li.xpath('./div[@class="class-a"]/p/a/text()').get()
            # if first_level == '金属切削机床':
            # #     continue
            #     pass
            # else:
            two_lis = li.xpath('./div[@class="class-b"]/div[@class="left-box"]/div[@class="item"]')
            collected_items = []
            for two_li in two_lis:
                two_level = two_li.xpath('./div[@class="title"]/p/a/text()').get()
                three_lis = two_li.xpath('./div[@class="content"]/p/a')
                for three_li in three_lis:
                    three_level = three_li.xpath('./text()').get()
                    three_url = three_li.xpath('./@href').get()
                    url_next = str(three_url).replace('.html', '') + "_p1.html"
                    id_ = re.findall(r'chanpin-(\d+)', url_next)[0] if re.findall(r'chanpin-(\d+)', url_next) else ''
                    data = {
                        "T": id_, "P": "1", "PID": "0", "CID": "0", "TID": "3", "Sort": "1",
                        "FldSort": "0", "PriceStart": "0", "PriceEnd": "0", "PBID": "0",
                        "K": "", "JustPC": "1", "PP": "0"
                    }
                    item = {
                        "first_level_product_name": first_level,
                        "two_level_product_name": two_level,
                        "three_level_product_name": three_level,
                        "url_next": url_next,
                        "post_data": data,
                        "id": id_,
                        "page": 1
                    }
                    headers = {
                        "content-type": "application/x-www-form-urlencoded",
                        "origin": "https://www.jc35.com",
                        "referer": "https://www.jc35.com/",
                        "user-agent": lua.random
                    }
                    response_date = self.requests_start(item['url_next'], data=item['post_data'], method='post', headers=headers)
                    if not response_date:
                        with open('parse_list_date.txt', 'a', encoding='utf-8') as f:
                            f.write(f'{url},\n')
                        logger.error(f'无法访问：{url}')
                        return

                    # count = re.findall('共(\d+)页', response_date.text)
                    count = re.findall('页(\d+)条记录', response_date.text)
                    total = int(count[0]) if count else ''
                    total_count = min(3000, total)

                    fieldnames = [
                        'first_level_product_name',
                        'two_level_product_name',
                        'three_level_product_name',
                        'three_level_product_href',
                        'type_count',
                        'sql_count'
                    ]

                    # 构建保存数据的字典
                    save_data = {
                        'first_level_product_name': item.get("first_level_product_name", ""),
                        'two_level_product_name': item.get("two_level_product_name", ""),
                        'three_level_product_name': item.get("three_level_product_name", ""),
                        'three_level_product_href': item.get("url_next", ""),
                        'type_count': total_count
                    }

                    # 查询数据库中的记录数
                    logger.info(f'开始查询：{save_data["three_level_product_name"]}')
                    sql_count = self.select_count(
                        self.cursor,
                        save_data['first_level_product_name'],
                        save_data['two_level_product_name'],
                        save_data["three_level_product_name"]
                    )

                    # 如果网站统计数量 > 数据库数量，说明数据缺失，记录到 error_type_count.csv
                    try:
                        if int(total_count) > sql_count:
                            save_data['sql_count'] = sql_count
                            write_header = not os.path.exists('error_type_count.csv')
                            with open('error_type_count.csv', 'a', newline='', encoding='utf-8') as f:
                                writer = csv.DictWriter(f, fieldnames=fieldnames)
                                if write_header:
                                    writer.writeheader()
                                writer.writerow(save_data)
                            logger.error(
                                f'数据缺失: {save_data["first_level_product_name"]}：{save_data["two_level_product_name"]}：{save_data["three_level_product_name"]}：sql:{sql_count}, 网站:{total_count}')
                    except Exception as e:
                        logger.error(f"写入 error_type_count.csv 失败：{e}")




if __name__ == "__main__":
    conn_sql = get_db_connection()
    cursor_sql = conn_sql.cursor()
    spider = Jc35Data(conn_sql,cursor_sql)
    spider.parse_list("https://www.jc35.com/Product/")

