import json
import math
import random
import re
import time
import copy
import hashlib
from datetime import datetime

from loguru import logger
from scrapy import Selector
from bs4 import BeautifulSoup, Tag
import mysql.connector
from curl_cffi import requests
from fake_useragent import UserAgent
from redis import StrictRedis
from getcookie import get_cookie
from get_proxy import get_random_proxy

lua = UserAgent()

REDIS_URL = f'redis://r-2zer2v9lrdl75a694s:tianmai_2018@r-2zer2v9lrdl75a694spd.redis.rds.aliyuncs.com:6379/14'
conn_redis = StrictRedis.from_url(REDIS_URL, encoding='utf-8', decode_responses=True)

def get_tag_content(soup):
    soup = BeautifulSoup(soup, 'html.parser') if not isinstance(soup, Tag) else soup
    pure_text = soup.get_text()
    return ' '.join(pure_text.split())

def get_md5(content: str):
    return hashlib.md5(content.encode(encoding='utf-8')).hexdigest()

class Jc35Data:
    def __init__(self):
        self.headers = {
            "user-agent": lua.random
        }
        self.session = requests.Session()
        self.cookies = {}

    def requests_start(self, url, **kwargs):
        response = False
        max_count = 100
        # proxy = get_random_proxy()
        for i in range(1, max_count):
            time.sleep(random.randint(1, 2))
            try:
                logger.info(f'尝试第{i}次请求：{url}')
                method = kwargs.get('method', '')
                if method.lower() != 'post':
                    params = kwargs.get('params', '')
                    if params:
                        response = requests.get(url, headers=self.headers, params=params, timeout=10, impersonate="chrome", cookies=self.cookies)
                    else:
                        response = requests.get(url, headers=self.headers, timeout=10, impersonate="chrome",  cookies=self.cookies)
                else:
                    response = requests.post(url, headers=kwargs.get('headers', self.headers), data=kwargs.get('data', ''), timeout=10, impersonate="chrome", cookies=self.cookies)
                if response.status_code == 200:
                    return response
                elif response.status_code == 468:
                    time.sleep(random.randint(2, 3))
                    # proxy = get_random_proxy()
                    self.cookies = get_cookie({})
                else:
                    time.sleep(random.randint(2, 3))
                    # proxy = get_random_proxy()
            except:
                time.sleep(random.randint(2, 3))
                # proxy = get_random_proxy()
        return response


    def parse_list(self, url):
        response_ = self.requests_start(url)
        if not response_:
            with open('error_save_parse_list.txt', 'a', encoding='utf-8') as f:
                f.write(f'{url},\n')
            logger.error(f'无法访问：{url}')
            return

        root = Selector(text=response_.text)
        lis = root.xpath('//div[@class="product-classify w260"]/ul/li')

        for li in lis:
            first_level = li.xpath('./div[@class="class-a"]/p/a/text()').get()
            if first_level == '金属切削机床':
                continue
            else:
                two_lis = li.xpath('./div[@class="class-b"]/div[@class="left-box"]/div[@class="item"]')
                collected_items = []
                for two_li in two_lis:
                    two_level = two_li.xpath('./div[@class="title"]/p/a/text()').get()
                    three_lis = two_li.xpath('./div[@class="content"]/p/a')
                    for three_li in three_lis:
                        three_level = three_li.xpath('./text()').get()
                        three_url = three_li.xpath('./@href').get()
                        url_next = str(three_url).replace('.html', '') + "_p1.html"
                        id_ = re.findall(r'chanpin-(\d+)', url_next)[0] if re.findall(r'chanpin-(\d+)', url_next) else ''
                        data = {
                            "T": id_, "P": "1", "PID": "0", "CID": "0", "TID": "3", "Sort": "1",
                            "FldSort": "0", "PriceStart": "0", "PriceEnd": "0", "PBID": "0",
                            "K": "", "JustPC": "1", "PP": "0"
                        }
                        item = {
                            "first_level_product_name": first_level,
                            "two_level_product_name": two_level,
                            "three_level_product_name": three_level,
                            "url_next": url_next,
                            "post_data": data,
                            "id": id_,
                            "page": 1
                        }
                        # Redis 推送自动重试
                        while True:
                            try:
                                conn_redis.sadd('get_jc35_data_new:type_start',json.dumps(item, ensure_ascii=False))
                                logger.info(f'推送Redis成功：{item["first_level_product_name"]}:{item["two_level_product_name"]}:{item["three_level_product_name"]}')
                                break
                            except Exception as e:
                                logger.error(f'Redis推送失败：{item["first_level_product_name"]}:{item["two_level_product_name"]}:{item["three_level_product_name"]}, 错误: {e}，重试中...')
                                time.sleep(2)


if __name__ == "__main__":
    spider = Jc35Data()
    spider.parse_list("https://www.jc35.com/Product/")

