import csv
import json
import math
import random
import re
import time
import copy
import hashlib
from datetime import datetime

from loguru import logger
from scrapy import Selector
from bs4 import BeautifulSoup, Tag
import mysql.connector
from curl_cffi import requests
from fake_useragent import UserAgent
from redis import StrictRedis
from getcookie import get_cookie
from get_proxy import get_random_proxy

lua = UserAgent()

REDIS_URL = f'redis://r-2zer2v9lrdl75a694s:tianmai_2018@r-2zer2v9lrdl75a694spd.redis.rds.aliyuncs.com:6379/14'
conn_redis = StrictRedis.from_url(REDIS_URL, encoding='utf-8', decode_responses=True)

def get_tag_content(soup):
    soup = BeautifulSoup(soup, 'html.parser') if not isinstance(soup, Tag) else soup
    pure_text = soup.get_text()
    return ' '.join(pure_text.split())

def get_md5(content: str):
    return hashlib.md5(content.encode(encoding='utf-8')).hexdigest()

class Jc35Data:
    def __init__(self):
        self.headers = {
            "user-agent": lua.random
        }
        self.session = requests.Session()
        self.cookies = {}

    def requests_start(self, url, **kwargs):
        response = False
        max_count = 100
        # proxy = get_random_proxy()
        for i in range(1, max_count):
            time.sleep(random.randint(1, 2))
            try:
                logger.info(f'尝试第{i}次请求：{url}')
                method = kwargs.get('method', '')
                if method.lower() != 'post':
                    params = kwargs.get('params', '')
                    if params:
                        response = requests.get(url, headers=self.headers, params=params, timeout=10, impersonate="chrome", cookies=self.cookies)
                    else:
                        response = requests.get(url, headers=self.headers, timeout=10, impersonate="chrome",  cookies=self.cookies)
                else:
                    response = requests.post(url, headers=kwargs.get('headers', self.headers), data=kwargs.get('data', ''), timeout=10, impersonate="chrome", cookies=self.cookies)
                if response.status_code == 200:
                    return response
                elif response.status_code == 468:
                    time.sleep(random.randint(2, 3))
                    # proxy = get_random_proxy()
                    self.cookies = get_cookie({})
                else:
                    time.sleep(random.randint(2, 3))
                    # proxy = get_random_proxy()
            except:
                time.sleep(random.randint(2, 3))
                # proxy = get_random_proxy()
        return response


    def parse_list(self, _):  # 参数不再使用
        with open('error_type_count.csv', 'r', encoding='utf-8') as f:
            reader = csv.DictReader(f)
            for row in reader:
                first_level = row['first_level_product_name']
                two_level = row['two_level_product_name']
                three_level = row['three_level_product_name']
                three_url = row['three_level_product_href']

                if not three_url:
                    logger.warning(f"缺失链接: {first_level} > {two_level} > {three_level}")
                    continue

                url_next = three_url
                id_ = re.findall(r'chanpin-(\d+)', url_next)
                if not id_:
                    logger.warning(f"无法提取ID: {url_next}")
                    continue
                id_ = id_[0]

                data = {
                    "T": id_, "P": "1", "PID": "0", "CID": "0", "TID": "3", "Sort": "1",
                    "FldSort": "0", "PriceStart": "0", "PriceEnd": "0", "PBID": "0",
                    "K": "", "JustPC": "1", "PP": "0"
                }
                item = {
                    "first_level_product_name": first_level,
                    "two_level_product_name": two_level,
                    "three_level_product_name": three_level,
                    "url_next": url_next,
                    "post_data": data,
                    "id": id_,
                    "page": 1
                }
                # Redis 推送自动重试
                while True:
                    try:
                        conn_redis.sadd('get_jc35_data_new:type_start', json.dumps(item, ensure_ascii=False))
                        logger.info(f'推送Redis成功：{item["first_level_product_name"]}:{item["two_level_product_name"]}:{item["three_level_product_name"]}')
                        break
                    except Exception as e:
                        logger.error(f'Redis推送失败：{item["first_level_product_name"]}:{item["two_level_product_name"]}:{item["three_level_product_name"]}, 错误: {e}，重试中...')
                        time.sleep(2)


if __name__ == "__main__":
    spider = Jc35Data()
    spider.parse_list("https://www.jc35.com/Product/")

