import json
import math
import random
import re
import time
import copy
import hashlib
from datetime import datetime

from loguru import logger
from scrapy import Selector
from bs4 import BeautifulSoup, Tag
import mysql.connector
from curl_cffi import requests
from fake_useragent import UserAgent
from redis import StrictRedis
from getcookie import get_cookie
from get_proxy import get_random_proxy

lua = UserAgent()

REDIS_URL = f'redis://r-2zer2v9lrdl75a694s:tianmai_2018@r-2zer2v9lrdl75a694spd.redis.rds.aliyuncs.com:6379/14'
conn_redis = StrictRedis.from_url(REDIS_URL, encoding='utf-8', decode_responses=True)

def get_tag_content(soup):
    soup = BeautifulSoup(soup, 'html.parser') if not isinstance(soup, Tag) else soup
    pure_text = soup.get_text()
    return ' '.join(pure_text.split())

def get_md5(content: str):
    return hashlib.md5(content.encode(encoding='utf-8')).hexdigest()

class Jc35Data:
    def __init__(self):
        self.headers = {
            "user-agent": lua.random
        }
        self.session = requests.Session()
        self.cookies = {}

    def requests_start(self, url, **kwargs):
        response = False
        max_count = 100
        # proxy = get_random_proxy()
        for i in range(1, max_count):
            time.sleep(random.randint(1, 2))
            try:
                logger.info(f'尝试第{i}次请求：{url}')
                method = kwargs.get('method', '')
                if method.lower() != 'post':
                    params = kwargs.get('params', '')
                    if params:
                        response = requests.get(url, headers=self.headers, params=params, timeout=10, impersonate="chrome", cookies=self.cookies)
                    else:
                        response = requests.get(url, headers=self.headers, timeout=10, impersonate="chrome",  cookies=self.cookies)
                else:
                    response = requests.post(url, headers=kwargs.get('headers', self.headers), data=kwargs.get('data', ''), timeout=10, impersonate="chrome", cookies=self.cookies)
                if response.status_code == 200:
                    return response
                elif response.status_code == 468:
                    time.sleep(random.randint(2, 3))
                    # proxy = get_random_proxy()
                    self.cookies = get_cookie({})
                else:
                    time.sleep(random.randint(2, 3))
                    # proxy = get_random_proxy()
            except:
                time.sleep(random.randint(2, 3))
                # proxy = get_random_proxy()
        return response


    def parse_list_date(self, url, meta):
        type_dict_ = copy.deepcopy(meta)
        headers = {
            "content-type": "application/x-www-form-urlencoded",
            "origin": "https://www.jc35.com",
            "referer": "https://www.jc35.com/",
            "user-agent": lua.random
        }
        response_date = self.requests_start(url, data=meta['post_data'], method='post', headers=headers)
        if not response_date:
            with open('parse_list_date.txt', 'a', encoding='utf-8') as f:
                f.write(f'{url},\n')
            logger.error(f'无法访问：{url}')
            return

        root = Selector(text=response_date.text)
        lis = root.xpath('//div[@class="pro-list productsLists"]/ul/li')
        for li in lis:
            product_name = li.xpath('./div[@class="item"]/p/text()').get('').strip() or li.xpath('./div[@class="item"]/p/a/text()').get('').strip()
            company_name = li.xpath('./div[@class="item"]/div[@class="company"]/a/text()').get('').strip()
            member_level = li.xpath('./div[@class="item"]/div[@class="level"]/b/@class').get('').strip()
            member_year = ''.join(li.xpath('./div[@class="item"]/div[@class="year"]//text()').getall()).strip()
            details_url = li.xpath('./div[@class="item"]/a[@class="proLink"]/@href').get('').strip()
            type_dict_next = copy.deepcopy(meta)
            type_dict_next.update({
                'product_name': product_name,
                'company_name': company_name,
                'member_level': member_level,
                'member_year': member_year,
                'details_url': details_url
            })
            # Redis 推送自动重试
            while True:
                try:
                    conn_redis.sadd('get_jc35_data_new:pages', json.dumps(type_dict_next, ensure_ascii=False))
                    logger.info(f'推送Redis成功：{type_dict_next["first_level_product_name"]}:{type_dict_next["two_level_product_name"]}:{type_dict_next["three_level_product_name"]}')
                    break
                except Exception as e:
                    logger.error(f'Redis推送失败：{type_dict_next["first_level_product_name"]}:{type_dict_next["two_level_product_name"]}:{type_dict_next["three_level_product_name"]}, 错误: {e}，重试中...')
                    time.sleep(2)

        count = re.findall('共(\d+)页', response_date.text)
        # count = re.findall('页(\d+)条记录', response_date.text)
        total = int(count[0]) if count else 1
        if type_dict_['page'] <= min(100, total):
            type_dict_['page'] += 1
            url_next_ = f'https://www.jc35.com/chanpin-{type_dict_["id"]}_p{type_dict_["page"]}.html'
            type_dict_['post_data']['P'] = str(type_dict_['page'])
            self.parse_list_date(url_next_, type_dict_)

def turn_page():
    spider = Jc35Data()
    while True:
        data = conn_redis.spop('get_jc35_data_new:type_start')
        # data = conn_redis.srandmember('get_jc35_data:pages')
        if not data:
            break
        item = json.loads(data)
        try:
            details_url = item['url_next']
            spider.parse_list_date(details_url,item)
            # time.sleep(random.uniform(2,5))
        except Exception as e:
            logger.error(f'请求错误：{e}')
            conn_redis.sadd('get_jc35_data_new:type_start', data)



if __name__ == "__main__":
    turn_page()

