import scrapy
import re
from bs4 import BeautifulSoup
from datetime import datetime

from taoguba.items import TaogubaJsonItem


class YuJinXiangSpider(scrapy.Spider):
    name = 'yujinxiang'
    custom_settings = {
        'DOWNLOAD_DELAY': 2,
        'CONCURRENT_REQUESTS': 1
    }

    # 初始化配置
    def __init__(self, url, start_page=1, end_page=10, *args, **kwargs):
        super().__init__(**kwargs)
        self.url = url
        self.start_page = int(start_page)
        self.end_page = int(end_page)
        self.id = 1


    def start_requests(self):
        """
        scrapy crawl yujinxiang -a url=2fyTCAkgewH -a start_page=1 -a end_page=65
        -a url=2eCOuOqAjhO -a end_page=150
        -a url=2fyTCAkgewH -a end_page=65
        -a url=2fMfvSXDZ5W -a end_page=16
        """
        u = self.url + '-'

        for i in range(self.start_page, self.end_page + 1):
            url = 'https://www.tgb.cn/a/' + u + str(i) + '?type=Z'
            yield scrapy.FormRequest(
                url=url,
                callback=self.parse
            )

    def parse(self, response):
        weekdays = ['周一', '周二', '周三', '周四', '周五', '周六', '周日']
        soup = BeautifulSoup(response.body, 'html.parser')
        dt = {'user_10016726': '郁金香', 'user_10272915': '小郁导'}
        # 1. 提取所有用户ID并转义特殊字符
        user_ids = [re.escape(user_id) for user_id in dt.keys()]

        # 2. 构造正则表达式：匹配 "comment-data user_xxx"
        pattern = re.compile(r'^comment-data (' + '|'.join(user_ids) + r')$')

        # 3. 按页面顺序获取所有目标评论
        comments = soup.find_all('div', class_=pattern)

        # 4. 遍历并关联用户信息
        for comment in comments:
            # 从class中提取用户ID（排除comment-data）
            user_id = next((cls for cls in comment['class'] if cls in dt), None)
            if user_id:
                item = TaogubaJsonItem()
                item['id'] = self.id
                self.id += 1
                item['post_date'] = comment.find('span', attrs={'class': 'pcyclspan'}).get_text().strip()
                item['post_weekday'] = weekdays[datetime.fromisoformat(item['post_date']).weekday()]
                item['poster_name'] = dt[user_id]
                item['post_text'] = comment.find('div', attrs={'class': 'comment-data-text'}).get_text().strip()
                item['quoted_url'] = ''
                item['quoted_name'] = ''
                item['quoted_text'] = ''
                quote = comment.find('div', attrs={'class': 'comment-data-quote'})
                if quote:
                    item['quoted_name'] = quote.find('a', attrs={'class': 'fs15'}).get_text().strip()
                    span_ele = quote.find('span', attrs={'class': 'data-quote-zk'})
                    if span_ele:
                        a_ele = span_ele.find('a', attrs={'class': 'c666'})
                        if a_ele:
                            a_text = a_ele.get('onclick').split(',')
                            item['quoted_url'] = f'https://www.tgb.cn/topic/getReplyContent?topicID={a_text[1]}&replyID={a_text[2][:-1]}'
                    else:
                        item['quoted_text'] = comment.find('p', attrs={'class': 'data-quote-text'}).get_text().strip()

                yield item
