# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface

import pandas as pd
import time
import re

from taoguba.items import TaogubaJsonItem, TaogubaExcleItem
from taoguba.spiders.StructPipe import StructpipeSpider


class JsonPipeline(object):
    columns = [
        'id', 'poster_name', 'post_date', 'post_weekday', 'post_text',
        'quoted_url', 'quoted_name', 'quoted_text'
    ]
    def open_spider(self, spider):
        if spider.name == 'yujinxiang':
            self.df = pd.DataFrame(columns=self.columns)

    def process_item(self, item, spider):
        if isinstance(item, TaogubaJsonItem):
            df = pd.DataFrame([item])
            self.df = pd.concat([self.df, df], ignore_index=True)
        return item

    def close_spider(self, spider):
        if spider.name == 'yujinxiang':
            ordered_columns = self.columns
            self.df['post_date'] = pd.to_datetime(self.df['post_date'], errors='coerce')
            self.df = self.df.sort_values(by='id', na_position='last')
            self.df = self.df[ordered_columns]
            self.df.to_json('aa.json',
              orient='records',
              date_format='iso',
              force_ascii=False,
              indent=2)

class MarkdownPipeline(object):
    def open_spider(self, spider):
        if spider.name == StructpipeSpider.name:
            self.df = pd.DataFrame(columns=['id', 'quoted_text'])

    def process_item(self, item, spider):
        if isinstance(item, TaogubaExcleItem):
            df = pd.DataFrame([item])
            self.df = pd.concat([self.df, df], ignore_index=True)
        return item

    def close_spider(self, spider):
        if spider.name == StructpipeSpider.name:
            df = pd.read_json('aa.json', orient='records')
            df.set_index('id', inplace=True)
            df.update(self.df.set_index('id')['quoted_text'])
            df = df.reset_index()
            df['post_text'] = df['post_text'].apply(self.clean_a_tags)
            df['post_text'] = df['post_text'].apply(self.dynamic_remove)
            df['quoted_text'] = df['quoted_text'].apply(self.clean_a_tags)
            df['quoted_text'] = df['quoted_text'].apply(self.dynamic_remove)
            df["post_date"] = pd.to_datetime(df["post_date"]).dt.strftime("%Y-%m-%d %H:%M")
            markdown_output = "\n".join(df.apply(self.generate_markdown, axis=1).tolist())
            t = int(time.time())
            # 保存为.md文件
            with open(f'{t}.md', 'w', encoding='utf-8') as f:
                f.write(markdown_output)

    def dynamic_remove(self, text, max_chinese=9):
        regex = fr'''
            (［(?![^］]*[a-zA-Z])(?:[\u4e00-\u9fff][^］]*?){{0,{max_chinese}}}］)
            |(\[(?![^]]*[a-zA-Z])(?:[\u4e00-\u9fff][^]]*?){{0,{max_chinese}}}\])
        '''
        return re.sub(regex, '', text, flags=re.VERBOSE)

    def clean_a_tags(self, text):
        # 匹配所有<a>标签并保留内部文本
        pattern = r'<a\b[^>]*>(.*?)</a>'
        return re.sub(pattern, r'\1', text, flags=re.IGNORECASE|re.DOTALL)

    def generate_markdown(self, row):
        """动态生成Markdown段落"""
        # 构造时间标题
        time_header = f"#### {row['post_date']} - {row['post_weekday']}\n\n"

        # 处理引用内容
        quote_block = ""
        if pd.notna(row['quoted_name']) and row['quoted_text']:
            quote_block = f"> **{row['quoted_name']}**：{row['quoted_text']}\n\n"

        # 构造主体内容
        main_content = f"**{row['poster_name']}**：{row['post_text']}"

        return f"{time_header}{quote_block}{main_content}\n\n"