import re
import json
import time
import requests
from bs4 import BeautifulSoup
from loguru import logger
from config import *
import random
from mysql_util import MySQLConnectionPool
from urllib.parse import urlparse
from pathlib import Path
from kafka import KafkaProducer
import hashlib
import os
from redis_queue import RedisQueueCluster
from minio_client import MinioClient
logger.add(sink="log.log", level="INFO", rotation="500 MB", retention="7 days")


class BasePolicySpider:
    def __init__(self, base_url, list_page_pattern, detail_base_url=None):
        self.headers = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Pragma": "no-cache",
            "Referer": "",
            "Sec-Fetch-Dest": "document",
            "Sec-Fetch-Mode": "navigate",
            "Sec-Fetch-Site": "same-origin",
            "Sec-Fetch-User": "?1",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36",
            "sec-ch-ua": '"Google Chrome";v="137", "Chromium";v="137", "Not/A)Brand";v="24"',
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": '"Windows"'
        }
        self.cookies = {}
        self.MinioClient = MinioClient(
                            endpoint=endpoint,
                            access_key=access_key,
                            secret_key=secret_key,
                            bucket_name=bucket_name,
                            base_path=base_path,
                            secure=False
                        )
        self.producer = KafkaProducer(bootstrap_servers=KAFKA_IP_PORT, api_version=(0, 11),value_serializer=lambda v: json.dumps(v).encode())
        self.base_url = base_url
        self.list_page_pattern = list_page_pattern
        self.redis = RedisQueueCluster(redis_conf_clustor, 'policy_data')
        self.detail_base_url = detail_base_url or base_url
        self.get_categories = [""]
        self.db = MySQLConnectionPool()
        self.attachment_extensions = {
            # 文档
            '.pdf', '.doc', '.docx', '.xls', '.xlsx', '.ppt', '.pptx',
            '.txt', '.log', '.csv', '.tsv', '.md', '.rtf', '.odt', '.ods', '.odp',
            # 图片
            '.jpg', '.jpeg', '.png', '.gif', '.bmp', '.svg', '.webp', '.tiff', '.tif',
            # 压缩包
            '.zip', '.rar', '.7z', '.tar', '.gz', '.tgz', '.bz2', '.jar', '.xpi', '.war',
            # 程序/安装包
            '.exe', '.msi', '.bat', '.sh', '.dmg', '.pkg', '.deb', '.rpm',
            # 音视频
            '.mp3', '.wav', '.aac', '.flac', '.ogg', '.wma',
            '.mp4', '.mkv', '.avi', '.mov', '.wmv', '.flv', '.mpeg', '.mpg',
            # 电子书
            '.epub', '.mobi', '.azw', '.azw3', '.djvu', '.chm',
            # 数据库/数据文件
            '.sqlite', '.db', '.sql', '.json', '.xml', '.yaml', '.yml',
            # 工程/设计文件
            '.war', '.jar', '.aar', '.apk', '.tar.gz', '.tar.bz2',
            '.psd', '.ai', '.sketch', '.fig', '.dwg', '.dxf', '.stp', '.igs',
            '.wps','.xls'
        }

    def get_one(self):
        while 1:
            try:
                url = "http://need1.dmdaili.com:7771/dmgetip.asp?apikey=4eeae3b6&pwd=47f2bfede26e6ae56bea1c47a8c79d64&getnum=50&httptype=1&geshi=2&fenge=1&fengefu=&operate=all"
                response = requests.get(url)
                msg = response.text
                if '该套餐已失效' in msg:
                    print('该套餐已失效')
                    return '该套餐已失效'
                if '访问频率太快' in msg:
                    print('访问频率太快')
                    time.sleep(1)
                    continue
                if 'API提取频率' in msg:
                    print('API提取频率')
                    time.sleep(1)
                    continue
                data = response.json()['data']
                # print(data)
                ip_list = [str(i['ip']).strip() + ':' + str(i['port']).strip() for i in data]
                return {'http': f'http://{ip_list[0]}','https': f'http://{ip_list[0]}'}
            except Exception as e:
                print('代理请求出错！！', e)
                time.sleep(2)
                continue
    def _session_get(self, url, retry=10000, timeout=15,requests_type = ''):
        for i in range(retry):
            try:
                if requests_type == 'download' and i==3 and self.get_url_houzhui(url) not in self.attachment_extensions:
                    return None
                # time.sleep(random.uniform(0.5, 1.5))
                self.headers['User-Agent'] = get_header()
                proxies = self.get_one()
                response = requests.get(url=url, headers=self.headers, timeout=timeout,  proxies=proxies, cookies=self.cookies)
                response.encoding = 'utf-8'
                if response.status_code == 404 or str(response.status_code).startswith('5'):
                    return '404 Not Found'
                if response.status_code != 200:
                    continue
                return response
            except Exception as e:
                print('请求出错',e)


    def get_detail(self, url):
        response = self._session_get(url)
        if not response or response == '404 Not Found':
            return None
        bs4 = BeautifulSoup(response.text, 'html.parser')
        # 遍历并替换所有的 <br> 标签为换行符 '\n'
        for br in bs4.find_all('br'):
            br.replace_with('\n')
        return bs4

    @staticmethod
    def sanitize_path(path):
        # 替换非法字符为下划线
        sanitized = re.sub(r'[<>:\\|?*\x00-\x1F]', '_', path)
        # 替换连续多个斜杠为单个
        sanitized = re.sub(r'[/\\]+', '/', sanitized)
        # 去除首尾空白字符
        return sanitized.strip('/').replace('“', '')


    def dict_values_md5(self,data):
        # 将字典的所有值转换为字符串并拼接在一起
        concatenated = ''.join(str(value) for value in data.values())
        # 对拼接后的字符串进行 MD5 加密
        md5_hash = hashlib.md5(concatenated.encode('utf-8')).hexdigest()
        return md5_hash



    def put_kafka(self, item):
        self.producer.send(KAFKA_TOPIC_NAME_NEW, item)
        print('kafka存储成功！！！', item)
    def to_json(self, my_dict, file_name='beijing.json'):
        json_str = json.dumps(my_dict, ensure_ascii=False)
        with open(file_name, 'a+', encoding='utf-8', errors='ignore') as f:
            f.write(json_str + '\n')
        print('获取成功', my_dict)

    def download_file(self, url, file_name):
        try:
            file_extension = os.path.splitext(url)[1]
            if file_extension not in file_name:
                file_name = file_name + file_extension
            response = self._session_get(url,requests_type = 'download')
            if response==None:
                return
            with open(file_name, 'wb') as f:
                f.write(response.content)
            print('下载成功', file_name)
            return file_name
        except Exception as e:
            print(e)

    def extract_common_fields(self, content):
        """
        提取通用字段，子类可覆盖此方法以适配不同页面结构
        """
        publish_date_tag = content.find(string=re.compile(r'发布(日期|时间)'))
        theme_tag = content.find(string=re.compile('主题分类'))
        unit_tag = content.find(string=re.compile('(制发单位|发文机构|发文单位)'))
        impl_date_tag = content.find(string=re.compile('实施(日期|时间)'))
        write_date_tag = content.find(string=re.compile('成文(日期|时间)'))
        file_num_tag = content.find(string=re.compile(r'发文(字号|序号)'))
        expire_date_tag = content.find(string=re.compile('(失效(日期|时间)|废止日期)'))
        validity_tag = content.find(string=re.compile('有效性'))

        return {
            'publish_date': publish_date_tag.find_next().text.strip() if publish_date_tag else '',
            'theme': theme_tag.find_next().text.strip() if theme_tag else '',
            'unit': unit_tag.find_next().text.strip() if unit_tag else '',
            'impl_date': impl_date_tag.find_next().text.strip() if impl_date_tag else '',
            'write_date': write_date_tag.find_next().text.strip() if write_date_tag else '',
            'file_num': file_num_tag.find_next().text.strip() if file_num_tag else '',
            'expire_date': expire_date_tag.find_next().text.strip() if expire_date_tag else '',
            'validity': validity_tag.find_next().text.strip() if validity_tag else '',
            'content':str(content)
        }

    def parse_detail_page(self, detail_url):
        raise NotImplementedError("子类必须实现解析详情页的方法")

    def download_to_json(self,dic,fujian_list,file_name,json_name):
        md5 = self.dict_values_md5(dic)
        if not self.redis.sis_member(md5):
            # 附件存放路径
            cleaned_title = self.sanitize_path(dic['title'])
            if len(cleaned_title)>50:
                cleaned_title = cleaned_title[:50]
            base_path = os.path.join(file_name, cleaned_title)
            #  创建目录
            os.makedirs(base_path, exist_ok=True)
            # 附件名称
            attach_path = [os.path.join(base_path, a.text.strip('\\').strip()[:50]) for a in fujian_list]

            obj_name_list = []
            for u, a in zip(dic['attach_url'], attach_path):
                file_path = self.download_file(u, a)
                # 存储文件到minio
                obj_name = file_path.replace('D:\\work\\code\\政策', '').replace('\\', '/')
                obj_name = self.sanitize_path(obj_name)
                file_path = file_path.replace('\\', '/')
                self.MinioClient.upload_file(object_name=obj_name, file_path=file_path)
                obj_name_list.append(self.MinioClient.wrap_obj_name(obj_name))
            dic['attach_path'] = attach_path
            crawl_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
            dic['md5'] = md5
            dic['minio_path'] = obj_name_list
            dic['crawl_time'] = crawl_time
            #将json存入kafka
            self.put_kafka( dic)
            self.redis.sadd(md5)
            logger.info(f'md5 : {md5}')

    def run(self,crawl_page,platform,city,startpage=0,get_categories=None):
        self.platform = platform
        self.city = city
        if get_categories:
            self.get_categories = get_categories
        for t in self.get_categories:
            for i in range(startpage,crawl_page):
                time.sleep(random.uniform(0, 0.5))
                logger.info("正在抓取-------》city：%s platform ：%s  模块：%s 第 %s 页" % (city,platform,t,i))
                result = self.get_list(i, t)
                if result == '404 Not Found':
                    break

    def get_url_houzhui(self,url):
        parsed_url = urlparse(url)
        path = parsed_url.path  # '/path/to/file.html'
        suffix = Path(path).suffix  # '.html'
        return suffix

    def get_list(self, p, t):
        if p == 0:
            url = self.list_page_pattern.format(t=t, p='')
        else:
            url = self.list_page_pattern.format(t=t, p=f"_{p}")
        response = self._session_get(url)
        if response == '404 Not Found':
            return '404 Not Found'
        soup = BeautifulSoup(response.text, 'html.parser')
        return self.extract_list_from_page(soup, t,url)

    def extract_list_from_page(self, soup, t,url):
        raise NotImplementedError("子类必须实现从列表页提取链接的方法")

