import json
import random
import time
from datetime import datetime

import copy
import hashlib
from utils.get_text import get_tag_content
from loguru import logger
from scrapy import Selector
from bs4 import BeautifulSoup, NavigableString, Tag, Comment
from dp_verify_code import verify_page
from DrissionPage import ChromiumOptions, Chromium
from redis import StrictRedis
import mysql.connector

REDIS_URL = f'redis://r-2zer2v9lrdl75a694s:tianmai_2018@r-2zer2v9lrdl75a694spd.redis.rds.aliyuncs.com:6379/14'
conn = StrictRedis.from_url(REDIS_URL, encoding='utf-8', decode_responses=True)

# 设置数据库连接
# db_config = {
#     'host': '127.0.0.1',  # MySQL 服务器地址
#     'user': 'root',  # MySQL 用户名
#     'password': '1234',  # MySQL 密码
#     'database': 'tender',  # MySQL 数据库名
# }

db_config = {
    'host': 'pc-2ze9oh2diu5e5firh.rwlb.rds.aliyuncs.com',  # MySQL 服务器地址
    'user': 'data_collection',  # MySQL 用户名
    'password': 'CRNabzFQ2H',  # MySQL 密码
    'database': 'cxzx_xm',  # MySQL 数据库名
}

# 连接 MySQL 数据库
def get_db_connection():
    return mysql.connector.connect(**db_config)


# 插入数据到 MySQL
def insert_data_to_mysql(data):
    # 获取数据库连接
    conn = get_db_connection()
    cursor = conn.cursor()
    try:
        # SQL 插入语句
        insert_query = """
        INSERT INTO gaidehuagong 
        (pmid,brand_name, businessCreditCode, businessScope, productDetail, companyIntroduction, company, agathertime)
        VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
        """

        # 提取需要插入的数据
        values = (
            data['pmid'],
            data['brand_name'],
            data['businessCreditCode'],
            data['businessScope'],
            data['productDetail'],
            data['companyIntroduction'],
            data['company'],
            data['agathertime'],
        )

        # 执行插入操作
        cursor.execute(insert_query, values)
        conn.commit()
        logger.info(f"成功插入数据: {data['brand_name']}")

    except mysql.connector.Error as err:
        logger.error(f"插入数据失败: {err}")
    finally:
        cursor.close()
        conn.close()


def get_tag_content(soup):
    """
    获取标签内容
    """
    soup = BeautifulSoup(soup, 'html.parser') if not isinstance(soup, Tag) else soup
    # 提取纯文本
    pure_text = soup.get_text()
    # 清理多余的空白字符
    clean_text = ' '.join(pure_text.split())
    return clean_text


def get_md5(content: str):
    """
    md5加密
    Args:
        content (str): 文本内容
    """
    return hashlib.md5(content.encode(encoding='utf-8')).hexdigest()


def select_item(item: dict):
    conn = get_db_connection()
    cursor = conn.cursor()
    sql = f"select count(pmid) as mid_count from gaidehuagong where pmid = '{item['pmid']}'"
    cursor.execute(sql)
    result = cursor.fetchone()
    return result


class ChinaguidechemProductSpider:
    def __init__(self):
        self.headers = {
            "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "accept-language": "zh-CN,zh;q=0.9",
            "cache-control": "max-age=0",
            "content-type": "application/x-www-form-urlencoded",
            "origin": "https://china.guidechem.com",
            "priority": "u=0, i",
            "referer": "https://china.guidechem.com/product/listc_catid-2512-p3.html",
            "sec-ch-ua": "\"Microsoft Edge\";v=\"137\", \"Chromium\";v=\"137\", \"Not/A)Brand\";v=\"24\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "\"Windows\"",
            "sec-fetch-dest": "document",
            "sec-fetch-mode": "navigate",
            "sec-fetch-site": "same-origin",
            "upgrade-insecure-requests": "1",
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36 Edg/137.0.0.0"
        }
        # 初始化浏览器和标签页
        co = ChromiumOptions().auto_port()
        co.set_argument('--incognito')
        browser = Chromium(co)
        self.tab = browser.get_tab()
    def requests_start(self, url, **kwargs):
        response = False
        max_count = 10
        count = 0
        for i in range(1, max_count):
            time.sleep(random.randint(2,5))
            try:
                logger.info(f'尝试第{i}次请求：{url}')
                self.tab.get(url)
                self.tab.wait.load_start()
                time.sleep(2)
                if '拖动滑块完成拼图' in str(self.tab.html) or '请依次点击' in str(self.tab.html):
                    verify_page(url,self.tab)
                    self.tab.wait.load_start()
                    time.sleep(2)
                    if ('拖动滑块完成拼图' not in str(self.tab.html)) or ('请依次点击' not in str(self.tab.html)):
                        response = self.tab.html
                        break
                    else:
                        continue
                else:
                    response = self.tab.html
                    break
            except:
                continue

        return response

    def start_requests(self,urls):
        # urls = [
        #     "https://china.guidechem.com/product/listc_catid-2512-p1.html"
        # ]
        for url in urls:
            type_dict = {
                "for_count": 1,
                "page": 1,
            }
            self.parse_list(url, type_dict)

    def parse_list(self, url, meta):
        response_ = self.requests_start(url)
        if not response_:
            with open('error_save_parse_list.txt','a', encoding='utf-8') as f:
                f.write(f'{str(url)},\n')
                raise Exception('无法访问！！！')
        else:
            response = Selector(text=response_)
            lis = response.xpath('//div[@class="seaec_titl_nav_list"]/dl/dd')
            for li in lis:
                name_1 = li.xpath('./div[2]/span/a/text()').get('')
                name_2 = li.xpath('./div[2]/span/i[@style="float: left;"]/a/text()').get('')
                detail_url = 'https://china.guidechem.com' + li.xpath('./div[2]/span/a/@href').get()
                brand_name = name_1 + name_2
                company = li.xpath('./div[3]/div[2]/em[1]/a/text()').get()
                if company:
                    href = f"https://china.guidechem.com/common/getAuditedSupplier.jsp?nameid={company}"
                    type_dict = copy.deepcopy(meta)
                    type_dict['brand_name'] = brand_name
                    type_dict['company'] = company
                    type_dict['detail_url'] = detail_url

                    type_dict['pmid'] = get_md5(f"{brand_name}{company}{detail_url}")
                    if int(select_item(item=type_dict)[0]) == 0:
                        self.parse_list_1(href, type_dict)

            # url_next = response.xpath('//a[@class="page_op_txt" and contains(text(),"下一页")]/@href').get()
            # if url_next:
            #     page_url = 'https://china.guidechem.com' + url_next
            #     logger.info(f'爬取下一页：{page_url}')
            #     page_dict = copy.deepcopy(meta)
            #     self.parse_list(page_url, page_dict)


    def parse_list_1(self, url, meta):
        response_1 = self.requests_start(url)
        if not response_1:
            with open('error_save_parse_list_1.txt','a', encoding='utf-8') as f:
                f.write(f'{str(url)},\n')
                raise Exception('无法访问！！！')
        else:
            response = Selector(text=response_1)
            businessCreditCode = ''
            businessScope = ''
            ul_lis = response.xpath('//div[@class="itl_nav_er_pk"]/ul')
            for ul in ul_lis:
                li_lis = ul.xpath('./li')
                for li_ in li_lis:
                    type_text = li_.xpath('./span/text()').get()
                    if '统一社会信用代码' in type_text:
                        businessCreditCode = li_.xpath('./em/text()').get()
                    if '经营范围' in type_text:
                        businessScope = li_.xpath('./em/text()').get()
            meta['businessCreditCode'] = businessCreditCode
            meta['businessScope'] = businessScope
            if 'javascript:mklink' in meta["detail_url"]:
                save_item = {}
                # 产品名称
                save_item['brand_name'] = meta['brand_name']
                save_item['company'] = meta['company']
                # 工商信用代码
                save_item['businessCreditCode'] = meta['businessCreditCode']
                # 经营范围
                save_item['businessScope'] = meta['businessScope']
                # 产品详情
                save_item['productDetail'] = ''
                # 公司简介
                save_item['companyIntroduction'] = ''
                save_item['pmid'] = meta['pmid']
                now = datetime.now()
                save_item['agathertime'] = now.strftime('%Y-%m-%d %H:%M:%S')
                insert_data_to_mysql(save_item)
            else:
                type_dict = copy.deepcopy(meta)
                self.parse_detail(type_dict["detail_url"], type_dict)


    def parse_detail(self, url, meta):
        response_2 = self.requests_start(url)
        if not response_2:
            with open('error_save_parse_detail.txt','a', encoding='utf-8') as f:
                f.write(f'{str(url)},\n')
                raise Exception('无法访问！！！')
        else:
            if '您要访问的页面不存在...' not in str(response_2):
                response = Selector(text=response_2)
                lis = response.xpath('//div[@class="in_cen_main_nt4"]/dl/dd')
                productDetail_list = []
                for li in lis:
                    k_text = ''.join(li.xpath('./span//text()').getall()).strip()
                    v_text = ''.join(li.xpath('./em//text()').getall()).strip()
                    text = k_text + v_text
                    productDetail_list.append(text)
                productDetail = '|||'.join(productDetail_list)

                companyIntroduction_all_text = response.xpath('//div[@class="in_cen_main_nt2"]').get()
                companyIntroduction = get_tag_content(soup=companyIntroduction_all_text)
            else:
                productDetail = ''
                companyIntroduction = ''
            save_item = {}
            # 产品名称
            save_item['brand_name'] = meta['brand_name']
            save_item['company'] = meta['company']
            # 工商信用代码
            save_item['businessCreditCode'] = meta['businessCreditCode']
            # 经营范围
            save_item['businessScope'] = meta['businessScope']
            # 产品详情
            save_item['productDetail'] = productDetail
            # 公司简介
            save_item['companyIntroduction'] = companyIntroduction
            save_item['pmid'] = meta['pmid']
            now = datetime.now()
            save_item['agathertime'] = now.strftime('%Y-%m-%d %H:%M:%S')
            insert_data_to_mysql(save_item)



def turn_page():
    spider = ChinaguidechemProductSpider()
    while True:
        data = conn.spop('chinaguidechem_product_bak:pages')
        if not data:
            break
        item = json.loads(data)
        try:
            url = item.get('url')
            spider.start_requests([url])
            time.sleep(random.uniform(5,10))
        except Exception as e:
            logger.error('请求错误')
            conn.sadd('chinaguidechem_product_bak:pages', data)


if __name__ == "__main__":
    turn_page()
