# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import os
import time
import imageio
import jieba
import matplotlib.image as mpimg
import wordcloud
# useful for handling different item types with a single interface
from matplotlib import pyplot as plt


# 写进数据库的管道
# class DbPipeline:
#
#     #从爬虫获取到的数据，写入数据库
#     @classmethod
#     def from_crawler(cls, crawler:Crawler):
#         settings = crawler.settings
#         host = settings['DB_HOST']
#         port = settings['DB_PORT']
#         username = settings['DB_USER']
#         password = settings['DB_PASS']
#         database = settings['DB_NAME']
#         return cls(host, port, username, password, database)#返回一个对象
#
#     def __init__(self,host,port,username,password,database):
#         self.conn=pymysql.connect(
#             host=host,
#             port=port,
#             user=username,
#             password=password,
#             database=database,
#             charset='utf8'#字符集
#         )
#         self.cursor=self.conn.cursor()
#         self.data=[]
#         self._counter=1
#
#     def close_spider(self,spider):
#         if len(self.data)>0:
#             self._write_to_Db()
#         self.conn.close()
#
#
#
#     # 将获得的数据放入数据库
#     def process_item(self, item, spider):
#         title = item.get('title')
#         rank = item.get('rank')
#         subject = item.get('subject')
#         duration = item.get('duration')
#         intro=item.get('intro')
#         self.data.append((self._counter,title,rank,subject,duration,intro))
#         self._counter+=1
#         if len(self.data)==100: #当有100条数据写入数据库,提高写入的整体速度
#             self._write_to_Db()
#         return item
#
#     # 定义入库函数,减少代码的重复
#     def _write_to_Db(self):
#         self.cursor.executemany(
#             'INSERT INTO `movies` (`id`,`title`, `rank`, `subject`,`duration`,`intro`) VALUES (%s,%s, %s, %s,%s,%s)',
#             self.data
#         )
#         self.conn.commit()
#         self.data.clear()
# 写进txt文件的管道
class TxtPipeline:

    # 获取电影名称
    movie_name = os.getenv('MOVIE_NAME')

    def __init__(self, movie_name = movie_name):
        # 初始化评论文件
        # 这里使用'w'模式打开文件，意味着如果文件不存在将创建新文件，如果文件已存在则会清空文件内容
        # 指定encoding='utf-8'确保在处理中文评论时不会出现编码问题
        self.txtfile = open(f'T:/python实验/大作业/final2/tutorial/{movie_name}.txt', 'w', encoding='utf-8')

    def open_spider(self, spider):
        pass

    # 关闭爬虫时候进行对评论的处理
    def close_spider(self, spider, movie_name = movie_name):
        # 读取评论文件内容
        f = open(f'T:/python实验/大作业/final2/tutorial/{movie_name}.txt', "r", encoding="utf-8")
        t = f.read()
        f.close()
        # 进行分词
        ls = jieba.lcut(t)
        txt = " ".join(ls)
        # 返回遮罩的numpy数组
        stopwords = set()
        # 读取停用词文件
        content = [line.strip() for line in
                   open("T:/python实验/大作业/final2/tutorial/stop_words.txt", 'r', encoding='utf-8').readlines()]
        stopwords.update(content)
        # 读取遮罩图片
        mask_array = imageio.imread("T:/python实验/大作业/final2/tutorial/mask.png")
        # 创建词云对象
        w = wordcloud.WordCloud( \
            width=500, height=500, \
            background_color="#F9F5E5",
            font_path="font.ttf",
            max_words=2000,
            mask=mask_array,
            min_font_size=10,
            max_font_size=100,
            scale=1,
            colormap="Blues",  # 设置总体的基调为红色
            stopwords=stopwords  # 设置停用词以达到美观的效果
        )
        # 载入用空格分隔好的字段
        w.generate(txt)
        # 导出为图片
        w.to_file(f'T:/python实验/大作业/final2/tutorial/{movie_name}.png')
        time.sleep(3)  # 为写入图片争取时间
        # 关闭图像对象
        # 读取图片
        img = mpimg.imread(f'T:/python实验/大作业/final2/tutorial/{movie_name}.png')

        # 获取图片尺寸
        img_width, img_height = img.shape[1], img.shape[0]

        # 设置展示窗口的大小
        plt.figure(figsize=(10, 8))  # 这里设置窗口大小为宽10，高8

        # 计算展示图片时的缩放比例
        window_aspect_ratio = 10 / 8  # 窗口的宽高比
        img_aspect_ratio = img_width / img_height  # 图片的宽高比

        if img_aspect_ratio > window_aspect_ratio:
            # 图片更宽，根据窗口的高度进行缩放
            new_height = 8
            new_width = 8 * img_width / img_height
        else:
            new_width = 10
            new_height = 10 * img_height / img_width

        # 展示图片，并拉伸到窗口大小
        imgplot = plt.imshow(img, extent=[0, new_width, 0, new_height])
        plt.show()

    def process_item(self, item, spider, movie_name = movie_name):
        # 获取每个键值对中的评论
        comment = item.get('comment')
        # 写入文件中
        with open(f'{movie_name}.txt', 'a', encoding='utf-8') as f:
            f.write(comment)
        # self.ws.append((comment))
