# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


import datetime
# useful for handling different item types with a single interface
import json
import os
import re
import time
import sqlite3

import pandas as pd
import requests
from PIL import Image as PILImage
from bs4 import BeautifulSoup
from openpyxl.drawing.image import Image

from .items import sweiboItem

current_time = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")


def parse_time(date):
    """
    时间解析函数

    Args:
        date (_type_): 时间

    Returns:
        解析后的时间
    """
    if re.match('刚刚', date):
        date = time.strftime('%Y-%m-%d %H:%M', time.localtime(time.time()))
    if re.match(r'\d+分钟前', date):
        minute = re.match(r'(\d+)', date).group(1)
        date = time.strftime('%Y-%m-%d %H:%M', time.localtime(time.time() - float(minute) * 60))
    if re.match(r'\d+小时前', date):
        hour = re.match(r'(\d+)', date).group(1)
        date = time.strftime('%Y-%m-%d %H:%M', time.localtime(time.time() - float(hour) * 60 * 60))
    if re.match(r'\d+秒前?', date):
        second = re.match(r'(\d+)', date).group(1)
        date = time.strftime('%Y-%m-%d %H:%M', time.localtime(time.time() - float(second)))
    if re.match('昨天.*', date):
        date = re.match('昨天(.*)', date).group(1).strip()
        date = time.strftime('%Y-%m-%d', time.localtime(time.time() - 24 * 60 * 60)) + ' ' + date
    if re.match('今天.*', date):
        date = re.match('今天(.*)', date).group(1).strip()
        date = time.strftime('%Y-%m-%d', time.localtime(time.time())) + ' ' + date
    if re.match(r'\d{2}-\d{2}', date):
        date = time.strftime('%Y-', time.localtime()) + date + ' 00:00'
    if re.match(r'(\d{2})月(\d{2})日(.*)', date):
        date = re.match(r'(\d{2})月(\d{2})日(.*)', date)
        date = time.strftime('%Y-', time.localtime()) + date.group(1).strip() + "-" + date.group(
            2).strip() + date.group(3)
    if re.match(r'(\d+)年(\d{2})月(\d{2})日(.*)', date):
        date = re.match(r'(\d+)年(\d{2})月(\d{2})日(.*)', date)
        date = f'{date.group(1).strip()}-{date.group(2).strip()}-{date.group(3).strip()}' + date.group(4)
    return date


class SweiboPipeline:
    def process_item(self, item, spider):
        return item


class SaveToJsonPipeline(object):
    def open_spider(self, spider):
        self.file = open(f'result_{current_time}.json', 'w', encoding='utf-8')

    def close_spider(self, spider):
        self.file.close()

    def process_item(self, item, spider):
        line = json.dumps(dict(item), ensure_ascii=False) + "\n"
        self.file.write(line)
        return item


# class SaveToExcelPipeline:
#     def __init__(self):
#         self.df = pd.DataFrame()
#
#     def process_item(self, item, spider):
#         new_item_dict = dict(item)
#         self.df = self.df.append(new_item_dict, ignore_index=True)
#         return item
#
#     def close_spider(self, spider):
#         self.df.to_excel(filename, index=False, header=True)

class SaveToExcelPipeline:
    def __init__(self):
        self.data = []
        self.result_path = 'spider_result'
        self.filename = f"{self.result_path}/output.xlsx"

    def open_spider(self, spider):
        self.data = []
        if not os.path.exists(self.result_path):  # 判断result目录是否存在
            os.makedirs(self.result_path)

    def close_spider(self, spider):
        df = pd.DataFrame(self.data)
        check_item = ['author', 'content']
        try:
            origin_data = pd.read_excel(self.filename, sheet_name=None, engine='openpyxl')
        except Exception:
            origin_data = {}
        writer = pd.ExcelWriter(self.filename, engine='openpyxl')
        for module in df['module'].unique():
            module_data = df[df['module'] == module]
            if module in origin_data:
                merged_data = pd.concat([origin_data[module], module_data], ignore_index=True)
            else:
                merged_data = module_data
            deduplicated_df = merged_data.drop_duplicates(subset=check_item, keep='last')
            deduplicated_df.to_excel(writer, sheet_name=module, index=False)
            # print(deduplicated_df)
            # 2024/1/24 图片插入错行，浮动图片，希望图片可以嵌入到单元格，还需调试
            # self.insert_images(writer, module, deduplicated_df)
        writer.close()

    def process_item(self, item, spider):
        if isinstance(item, sweiboItem):
            if item.get('public_time'):
                item['public_time'] = item['public_time'].strip()
                item['public_time'] = parse_time(item.get('public_time'))
            if item.get('picture'):
                item['picture'] = self.download_images(item['picture'], item['module'], item['name'], item['author'])
        self.data.append(dict(item))

    def download_images(self, image_urls, module, name, author):
        image_path_list = []
        module_path = os.path.join(self.result_path, module)
        if not os.path.exists(module_path):
            os.makedirs(module_path)
        for index, image_url in enumerate(image_urls):
            response = requests.get(image_url)
            if 'video' in image_url:
                extension = 'mp4'
            else:
                extension = 'jpg'
            image_path = os.path.join(module_path, f'image_{name}_{author}_{index}.{extension}')
            if not os.path.exists(image_path):
                with open(image_path, 'wb') as f:
                    f.write(response.content)
            image_path_list.append(image_path)
        return image_path_list

    def insert_images(self, writer, module, df):
        sheet = writer.sheets[module]
        for index, row in df.iterrows():
            try:
                if isinstance(row['picture'], list):
                    row['picture'] = row['picture']
                else:
                    row['picture'] = eval(row['picture'])
                if row['picture']:
                    for i, image_path in enumerate(row['picture']):
                        # thumbnail_path = self.create_thumbnail(image_path)
                        # image = Image(thumbnail_path)
                        image = Image(os.path.join(r'D:\projects\sweibo\sweibo', image_path))
                        cell = sheet.cell(row=index, column=df.columns.get_loc('picture') + 1 + i)
                        image.width = 25
                        image.height = 25
                        sheet.add_image(image, cell.coordinate)
            except Exception as e:
                print(e)
                continue

    def create_thumbnail(self, image_path):
        thumbnail_path = image_path.replace('.jpg', '_thumbnail.jpg')
        if not os.path.exists(thumbnail_path):
            image = PILImage.open(image_path)
            image.thumbnail((100, 100))  # 调整缩略图的大小
            image.save(thumbnail_path)
        return thumbnail_path


class SaveToSQLitePipeline:
    def __init__(self):
        # 数据库文件路径
        self.db_path = 'spider_result'
        self.db_filename = f"{self.db_path}/output.db"
        # 连接到 SQLite 数据库
        if not os.path.exists(self.db_path):
            os.makedirs(self.db_path)
        self.conn = sqlite3.connect(self.db_filename)
        self.cursor = self.conn.cursor()
        # 创建数据表，如果不存在的话
        self.cursor.execute('''
            CREATE TABLE IF NOT EXISTS sweibo_data (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                name TEXT,
                module TEXT,
                hot TEXT,
                author TEXT,
                public_time TEXT,
                content TEXT,
                picture TEXT,
                UNIQUE(author, content)  -- 确保 author 和 content 的组合是唯一的
            )
        ''')

    def open_spider(self, spider):
        pass  # 数据库连接已经在 __init__ 中完成

    def close_spider(self, spider):
        # 提交事务并关闭数据库连接
        self.conn.commit()
        self.conn.close()

    def process_item(self, item, spider):
        if isinstance(item, sweiboItem):
            if item.get('public_time'):
                item['public_time'] = item['public_time'].strip()
                # 解析时间
                item['public_time'] = parse_time(item.get('public_time'))
            # 图片直接保存 URL
            picture_urls = item.get('picture')
            cates_dict = {'realtimehot': "实时热点", 'socialevent': "社会新闻", 
                          'entrank': "娱乐", 'sport': "体育", 'game': "游戏",
                          'school': "学校"}

            try:
                # 使用 INSERT OR IGNORE 避免重复插入
                sql = '''
                    INSERT INTO sweibo_data (name, module, hot, author, public_time, content, picture)
                    VALUES (?,?,?,?,?,?,?)
                    ON CONFLICT(author, content) DO UPDATE SET
                    public_time = excluded.public_time, hot=excluded.hot
                '''
                self.cursor.execute(sql, (
                    item.get('name', ''),
                    cates_dict.get(item.get('module', ''), ''),
                    item.get("hot", "0"),
                    item.get('author', ''),
                    item.get('public_time', ''),
                    item.get('content', ''),
                    str(picture_urls)
                ))
                if self.cursor.rowcount == 1:
                # 数据有效，成功插入
                    self.conn.commit()
                    spider.crawler.stats.inc_value('valid_item_count')
                    return item
                else:
                    # 数据无效，可能是主键冲突
                    spider.crawler.stats.inc_value('ignored_item_count')
            except sqlite3.Error as e:
                print(f"Error inserting data into database: {e}")
