# -*- coding: utf-8 -*-
# @Time    : 2025/07/28 08:24
# @Author  : Mr.su
# @FileName: methods.py
# @FileDesc:
from urllib.parse import urljoin
import redis, hashlib, json, requests, re, pymysql
from dataclasses import dataclass
from typing import List, Dict
from lxml import etree
from CrawlCenter.settings import MYSQL_CONFIG


def create_md5(data):
    if isinstance(data, str):
        # 如果是unicode先转utf-8
        data = data.encode("utf-8")
    m = hashlib.md5()
    m.update(data)
    return m.hexdigest()


# noinspection PyPep8Naming,PyAttributeOutsideInit
class RedisClient(object):
    def __init__(self):
        redis_config = {'host': '127.0.0.1', 'port': '6379', 'db': 5, 'password': '', 'decode_responses': True}
        self.pool = redis.ConnectionPool(**redis_config)

    @property
    def conn(self):
        if not hasattr(self, '_conn'):
            self.getConnection()
        return self._conn

    def getConnection(self):
        self._conn = redis.Redis(connection_pool=self.pool)


def clean_integer(data):
    data = data.strip()
    if data.isdigit():
        return int(data)
    dic = {'w': 10000, "W": 10000, '万': 10000, '亿': 100000000}
    if isinstance(data, int):
        return data
    if isinstance(data, str):
        if data.isdigit():
            return int(data)
        elif ',' in data:
            data = data.replace(',', '')
            return clean_integer(data)
        else:
            key = ''.join([i for i in list(dic.keys()) if i in data])
            return int(float(data.replace(key, '')) * dic[key])


@dataclass
class ColumnRule:
    field: str
    checked: bool
    rule_type: int
    value: str


class RuleProcessor:
    def __init__(self, raw_data: Dict):
        self.raw_data = raw_data
        self.rules: List[ColumnRule] = []

    def parse_rules(self) -> List[ColumnRule]:
        """解析原始数据为结构化规则列表"""
        for key, value in self.raw_data.items():
            if not key.startswith('column_rule'):
                continue

            parts = key.split('[')
            if len(parts) < 4:
                continue

            index = int(parts[1].rstrip(']'))
            field = parts[2].rstrip(']')
            prop = parts[3].rstrip(']')

            # 确保有足够的空间存储规则
            while len(self.rules) <= index:
                self.rules.append(None)

            if self.rules[index] is None:
                self.rules[index] = ColumnRule(
                    field=field,
                    checked=False,
                    rule_type=0,
                    value=''
                )

            rule = self.rules[index]
            if prop == 'checked':
                rule.checked = value == 'on'
            elif prop == 'rule_type':
                if value:
                    rule.rule_type = int(value)
                else:
                    rule.rule_type = ''
            elif prop == 'value':
                rule.value = value

        return [rule for rule in self.rules if rule is not None]

    def to_json(self) -> str:
        """将规则转换为JSON格式"""
        rules = []
        for rule in self.parse_rules():
            rules.append({
                'select': 'on' if rule.checked else 'off',
                'field': rule.field,
                'rule_type': rule.rule_type,
                'value': rule.value
            })

        return json.dumps({
            'detail_url': self.raw_data.get('detail_url') if 'detail_url' in self.raw_data else '',
            'website_id': self.raw_data.get('website_id'),
            'node_type': self.raw_data.get('node_type'),
            'column_rule': rules
        }, indent=2)


# noinspection PyMethodParameters
class ProcessJson:
    """封装json数据获取方法"""
    url = []

    @classmethod
    def parse_data(self, data, rules, rule_count, index):
        if data:
            if rules[index] != "[]":
                # 判断当前层是否为字典
                if isinstance(data, dict):
                    data = data[rules[index]]
                    # 如果当前规则索引加1不等于规则数量则当前规则索引不是最后一层
                    if index + 1 != rule_count:
                        self.parse_data(data, rules, rule_count, index + 1)
                    # 若当前规则索引加1等于规则数量
                    elif index + 1 == rule_count:
                        self.url.append(data)

            if rules[index] == "[]":
                # 判断当前层是否为列表
                if isinstance(data, list):
                    for record in data:
                        self.parse_data(record, rules, rule_count, index + 1)

    @classmethod
    def parse_rule(self, data, rule_str):
        # 将rule规则以.分割成列表
        self.url = []
        rules = rule_str.split(".")
        self.parse_data(data, rules, len(rules), 0)
        return self.url


headers = {
    'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 16_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Mobile/15E148 Safari/604.1'
}


def vail_rule(rule):
    if rule['node_type'] == 0:
        return {}
    elif rule['node_type'] == 1:
        url_lis = []
        column_res = requests.get(rule['column_url'], headers=headers)
        if rule['column_rule_type'] == 0:
            html = etree.HTML(column_res.content.decode())
            urls = html.xpath(rule['column_rule'])
        elif rule['column_rule_type'] == 1:
            urls = re.findall(r"""%s""" % rule['column_rule'], column_res.content.decode())
        else:
            urls = ProcessJson.parse_rule(json.loads(column_res.text), rule['column_rule'])
        for url in urls:
            print(url)
            url = rule['column_prefix'].replace('{str}', url)
            if not url.startswith('http'):
                url = urljoin(rule['column_url'], url)
            url_lis.append(url)
        return {'detail_urls': url_lis}
    else:
        rule = json.loads(RuleProcessor(rule).to_json())
        item = {}
        res = requests.get(rule['detail_url'], headers=headers)
        for config in rule['column_rule']:
            if config['select'] == 'on':
                if config['field'] == 'images':
                    if '&' not in config['value']:
                        match_rule, split_rule, splice_rule = config['value'], '', ''
                    else:
                        match_rule, split_rule, splice_rule = config['value'].split('&')
                    if config['rule_type'] == 0:
                        html = etree.HTML(res.content.decode())
                        img_lis = html.xpath(match_rule)
                    elif config['rule_type'] == 1:
                        img_lis = re.findall(r"""%s""" % match_rule, res.content.decode())
                    else:
                        img_lis = ProcessJson.parse_rule(json.loads(res.content.decode()), match_rule)
                    item['images'] = img_lis
                    if split_rule:
                        item['images'] = [img.split(split_rule)[0] for img in img_lis]
                    if splice_rule:
                        item['images'] = [splice_rule.replace('{str}', img) for img in img_lis]
                elif config['field'] == 'soure_url':
                    replace_rule = config['value'].split('{')[1].split('}')[0]
                    _str = '{' + replace_rule + '}'
                    if replace_rule.startswith('x'):
                        html = etree.HTML(res.content.decode())
                        replace_str = ''.join(html.xpath(replace_rule)).strip()
                    elif replace_rule.satrtswith('z'):
                        replace_str = ''.join(re.findall(r"""%s""" % replace_rule, res.content.decode())).strip()
                    elif replace_rule.startswith('j'):
                        replace_str = ''.join(ProcessJson.parse_rule(json.loads(res.content.decode()), replace_rule))
                    else:
                        replace_str = _str
                    item[config['field']] = config['value'].replace(_str, replace_str)
                else:
                    if config['rule_type'] == 0:
                        html = etree.HTML(res.content.decode())
                        item[config['field']] = ''.join(html.xpath(config['value']))
                    elif config['rule_type'] == 1:
                        item[config['field']] = ''.join(re.findall(r"""%s""" % config['value'], res.content.decode()))
                    else:
                        item[config['field']] = ''.join(ProcessJson.parse_rule(json.loads(res.content.decode()), rule['value']))
            else:
                item[config['field']] = ''
        return item


class GetWebsiteConfig:
    def __init__(self, website_id):
        self.conn = pymysql.connect(**MYSQL_CONFIG)
        self.cur = self.conn.cursor()
        self.website_id = website_id

    def get_website_config(self):
        self.cur.execute('select * from website_list where website_id="%s"' % self.website_id)
        line_data = self.cur.fetchone()
        key_lis = self.cur.description
        dic = dict(zip([i[0] for i in key_lis], line_data))
        return dic

    def get_node_config(self):
        self.cur.execute('select * from rule_list where website_id="%s"' % self.website_id)
        data_lis = self.cur.fetchall()
        key_lis = self.cur.description
        detail_nodes = []
        column_nodes = []
        for data in data_lis:
            dic = dict(zip([i[0] for i in key_lis], data))
            if dic['node_type'] == 2:
                detail_nodes.append(dic)
            else:
                # 规则采集数量,方便统计当前规则采集总量
                dic['rule_scraped_count'] = 0
                column_nodes.append(dic)
        return column_nodes, detail_nodes

    def run(self):
        website_config = self.get_website_config()
        column_nodes, detail_nodes = self.get_node_config()
        item = {'website_config': website_config, 'column_nodes': column_nodes, 'detail_nodes': detail_nodes}
        return item


def create_spider(website_id, create_dir):
    spider_name = website_id
    spider_config = GetWebsiteConfig(website_id).run()
    file_path = create_dir + '{}.py'.format(spider_name)
    temeplate_strs = [
        """# -*- coding: utf-8 -*-
# @Author  : Mr.su
# @FileDesc:

import scrapy, json, re, random
from urllib.parse import urljoin
from CrawlCenterSpiders.settings import UA_LIST
from CrawlCenterSpiders.tools.methods import ProcessJson, create_md5


class TemeplateSpider(scrapy.Spider):
    name = '{}'
    config = {}

    if config:
        website_config = config['website_config']
        column_nodes = config['column_nodes']
        detail_nodes = config['detail_nodes']
    else:
        raise SystemError('规则错误')""".format(spider_name, spider_config),
        """
    custom_settings = {'LOG_FILE': './spiders/logs/%s.txt'}
        """ % spider_name,
        """
    def start_requests(self):
        for node in self.column_nodes:
            if node['collect_status'] != 1:
                continue
            if node['node_type'] == 1:
                yield scrapy.Request(
                    node['column_url'], headers=random.choice(UA_LIST), callback=self.process_lis,
                    dont_filter=True, meta={"node": node}
                )

    def process_lis(self, response):
        node = response.meta['node']
        if node['column_rule_type'] == 0:
            detail_urls = response.xpath(node['column_rule']).extract()
        elif node['column_rule_type'] == 1:
            detail_urls = re.findall(r'''%s''' % node['column_rule'], response.text)
        elif node['column_rule_type'] == 2:
            detail_urls = ProcessJson.parse_rule(json.loads(response.text), node['column_rule'])
        else:
            detail_urls = []
        for detail_url in detail_urls:
            url = node['column_prefix'].replace('{str}', detail_url)
            if not url.startswith('http'):
                url = urljoin(node['column_url'], url)
            yield scrapy.Request(
                url, headers=random.choice(UA_LIST), callback=self.process_detail,
                meta={'node': node}
            )

    def process_detail(self, response):
        node = response.meta['node']
        for detail_node in self.detail_nodes:
            item = {}
            for config in eval(detail_node['column_rule']):
                if config['select'] == 'on':
                    if config['field'] == 'images':
                        if '&' not in config['value']:
                            match_rule, split_rule, splice_rule = config['value'], '', ''
                        else:
                            match_rule, split_rule, splice_rule = config['value'].split('&')
                        if config['rule_type'] == 0:
                            img_lis = response.xpath(config['value']).extract()
                        elif config['rule_type'] == 1:
                            img_lis = re.findall(r'''%s''' % config['value'], response.text)
                        else:
                            img_lis = ProcessJson.parse_rule(json.loads(response.text), config['value'])
                        item['images'] = img_lis
                        if split_rule:
                            item['images'] = [img.split(split_rule)[0] for img in img_lis]
                        if splice_rule:
                            item['images'] = [splice_rule.replace('{str}', img) for img in img_lis]
                    elif config['field'] == 'soure_url':
                        replace_rule = config['value'].split('{')[1].split('}')[0]
                        _str = '{' + replace_rule + '}'
                        if replace_rule.startswith('x'):
                            replace_str = ''.join(response.xpath(replace_rule).extract()).strip()
                        elif replace_rule.satrtswith('z'):
                            replace_str = ''.join(re.findall(r'''%s''' % replace_rule, response.text)).strip()
                        elif replace_rule.startswith('j'):
                            replace_str = ''.join(ProcessJson.parse_rule(json.loads(response.text), replace_rule))
                        else:
                            replace_str = _str
                        item[config['field']] = config['value'].replace(_str, replace_str)
                    else:
                        if config['rule_type'] == 0:
                            item[config['field']] = ''.join(response.xpath(config['value']).extract())
                        elif config['rule_type'] == 1:
                            item[config['field']] = ''.join(re.findall(r'''%s''' % config['value'], response.text))
                        else:
                            item[config['field']] = ''.join(ProcessJson.parse_rule(json.loads(response.text), config['value']))
                else:
                    item[config['field']] = ''
            if not item['brand']:
                item['brand'] = self.website_config['brand']
            item['_id'] = create_md5(item['images'][0])
            item['menu'] = self.website_config['menu']
            item['gender'] = node['gender'] if 'gender' in node else ''
            item['season'] = node['season'] if 'season' in node else ''
            item['City'] = node['city'] if 'city' in node else ''
            item['status'] = 0
            item['detail_url'] = response.url
            return item
"""
    ]
    with open(file_path, 'w', encoding='utf8') as f:
        for template in temeplate_strs:
            f.write(template)
