import json
import math
import random
import re
import time
import copy
import hashlib
from datetime import datetime

from loguru import logger
from scrapy import Selector
from bs4 import BeautifulSoup, Tag
import mysql.connector
from curl_cffi import requests
from fake_useragent import UserAgent
from redis import StrictRedis
from getcookie import get_cookie
from get_proxy import get_random_proxy

lua = UserAgent()

REDIS_URL = f'redis://r-2zer2v9lrdl75a694s:tianmai_2018@r-2zer2v9lrdl75a694spd.redis.rds.aliyuncs.com:6379/14'
conn_redis = StrictRedis.from_url(REDIS_URL, encoding='utf-8', decode_responses=True)

def get_tag_content(soup):
    soup = BeautifulSoup(soup, 'html.parser') if not isinstance(soup, Tag) else soup
    pure_text = soup.get_text()
    return ' '.join(pure_text.split())

def get_md5(content: str):
    return hashlib.md5(content.encode(encoding='utf-8')).hexdigest()

class Jc35Data:
    def __init__(self):
        self.headers = {
            "user-agent": lua.random
        }
        self.session = requests.Session()
        self.cookies = {}

    def requests_start(self, url, **kwargs):
        response = False
        max_count = 51
        proxy = get_random_proxy()
        for i in range(1, max_count):
            time.sleep(random.randint(2, 5))
            try:
                logger.info(f'尝试第{i}次请求：{url}，代理：{proxy}')
                method = kwargs.get('method', '')
                if method.lower() != 'post':
                    params = kwargs.get('params', '')
                    if params:
                        response = requests.get(url, headers=self.headers, params=params, timeout=10, impersonate="chrome", proxies=proxy, cookies=self.cookies)
                    else:
                        response = requests.get(url, headers=self.headers, timeout=10, impersonate="chrome", proxies=proxy, cookies=self.cookies)
                else:
                    response = requests.post(url, headers=kwargs.get('headers', self.headers), data=kwargs.get('data', ''), timeout=10, impersonate="chrome", proxies=proxy, cookies=self.cookies)
                if response.status_code == 200:
                    return response
                elif response.status_code == 468:
                    time.sleep(random.randint(10, 20))
                    proxy = get_random_proxy()
                    self.cookies = get_cookie(proxy)
                else:
                    time.sleep(random.randint(10, 20))
                    proxy = get_random_proxy()
            except:
                time.sleep(random.randint(10, 20))
                proxy = get_random_proxy()
        return response

    def clean_and_filter_categories(self, cursor, collected_items):
        first_level = collected_items[0]['first_level_product_name']
        query = """
            SELECT two_level_product_name, three_level_product_name
            FROM machine_tool_site
            WHERE first_level_product_name = %s
        """
        cursor.execute(query, (first_level,))
        existing_items = cursor.fetchall()
        collected_set = {
            (d['two_level_product_name'], d['three_level_product_name']) for d in collected_items
        }
        existing_set = set(existing_items)
        to_delete = existing_set - collected_set
        for two, three in to_delete:
            del_sql = """
                DELETE FROM machine_tool_site 
                WHERE first_level_product_name = %s AND two_level_product_name = %s AND three_level_product_name = %s
            """
            cursor.execute(del_sql, (first_level, two, three))
            logger.info(f"删除旧类目数据：{first_level} - {two} - {three}")
        to_collect = collected_set - existing_set
        return [item for item in collected_items if (item['two_level_product_name'], item['three_level_product_name']) in to_collect]

    def parse_list(self, url):
        response_ = self.requests_start(url)
        if not response_:
            with open('error_save_parse_list.txt', 'a', encoding='utf-8') as f:
                f.write(f'{url},\n')
            logger.error(f'无法访问：{url}')
            return

        root = Selector(text=response_.text)
        lis = root.xpath('//div[@class="product-classify w260"]/ul/li')
        conn_db = mysql.connector.connect(
            host='pc-2ze9oh2diu5e5firh.rwlb.rds.aliyuncs.com',
            user='data_collection',
            port=3306,
            password='CRNabzFQ2H',
            database='internal_collection',
            charset='utf8mb4'
        )
        cursor = conn_db.cursor()

        for li in lis:
            first_level = li.xpath('./div[@class="class-a"]/p/a/text()').get()
            two_lis = li.xpath('./div[@class="class-b"]/div[@class="left-box"]/div[@class="item"]')
            collected_items = []
            for two_li in two_lis:
                two_level = two_li.xpath('./div[@class="title"]/p/a/text()').get()
                three_lis = two_li.xpath('./div[@class="content"]/p/a')
                for three_li in three_lis:
                    three_level = three_li.xpath('./text()').get()
                    three_url = three_li.xpath('./@href').get()
                    url_next = str(three_url).replace('.html', '') + "_p1.html"
                    id_ = re.findall(r'chanpin-(\d+)', url_next)[0] if re.findall(r'chanpin-(\d+)', url_next) else ''
                    data = {
                        "T": id_, "P": "1", "PID": "0", "CID": "0", "TID": "3", "Sort": "1",
                        "FldSort": "0", "PriceStart": "0", "PriceEnd": "0", "PBID": "0",
                        "K": "", "JustPC": "1", "PP": "0"
                    }
                    collected_items.append({
                        "first_level_product_name": first_level,
                        "two_level_product_name": two_level,
                        "three_level_product_name": three_level,
                        "url_next": url_next,
                        "post_data": data,
                        "id": id_,
                        "page": 1
                    })
            items_to_collect = self.clean_and_filter_categories(cursor, collected_items)
            conn_db.commit()
            for item in items_to_collect:
                self.parse_list_date(item['url_next'], item)

        cursor.close()
        conn_db.close()

    def parse_list_date(self, url, meta):
        type_dict_ = copy.deepcopy(meta)
        headers = {
            "content-type": "application/x-www-form-urlencoded",
            "origin": "https://www.jc35.com",
            "referer": "https://www.jc35.com/",
            "user-agent": lua.random
        }
        response_date = self.requests_start(url, data=meta['post_data'], method='post', headers=headers)
        if not response_date:
            with open('parse_list_date.txt', 'a', encoding='utf-8') as f:
                f.write(f'{url},\n')
            logger.error(f'无法访问：{url}')
            return

        root = Selector(text=response_date.text)
        lis = root.xpath('//div[@class="pro-list productsLists"]/ul/li')
        for li in lis:
            product_name = li.xpath('./div[@class="item"]/p/text()').get('').strip() or li.xpath('./div[@class="item"]/p/a/text()').get('').strip()
            company_name = li.xpath('./div[@class="item"]/div[@class="company"]/a/text()').get('').strip()
            member_level = li.xpath('./div[@class="item"]/div[@class="level"]/b/@class').get('').strip()
            member_year = ''.join(li.xpath('./div[@class="item"]/div[@class="year"]//text()').getall()).strip()
            details_url = li.xpath('./div[@class="item"]/a[@class="proLink"]/@href').get('').strip()
            type_dict_next = copy.deepcopy(meta)
            type_dict_next.update({
                'product_name': product_name,
                'company_name': company_name,
                'member_level': member_level,
                'member_year': member_year,
                'details_url': details_url
            })
            try:
                conn_redis.sadd('get_jc35_data_new:pages', json.dumps(type_dict_next, ensure_ascii=False))
                logger.info(f'推送Redis成功：{product_name}')
            except Exception as e:
                logger.error(f'Redis推送失败：{product_name}, 错误: {e}')

        count = re.findall('页(\d+)条记录', response_date.text)
        total = math.ceil(int(count[0]) / 35) if count else 1
        if type_dict_['page'] <= min(100, total):
            type_dict_['page'] += 1
            url_next_ = f'https://www.jc35.com/chanpin-{type_dict_["id"]}_p{type_dict_["page"]}.html'
            type_dict_['post_data']['P'] = str(type_dict_['page'])
            self.parse_list_date(url_next_, type_dict_)

if __name__ == "__main__":
    spider = Jc35Data()
    spider.parse_list("https://www.jc35.com/Product/")
