# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import sqlite3
import jieba.posseg as psg
import jieba
class FinalworkPipeline:
    def process_item(self, item, spider):

        # print("item = ", item)
        # print("content", response.url, content)
        content = item['content']
        name = self.key_figure(content)
        keywords = self.word_extract(content)
        print("name", name)
        item['name'] = name
        item['keywords'] = keywords
        self.saveToSqlite(item)
        return item
        # 保存到本地Sqlite

    def saveToSqlite(self, info):
        url = info['url']
        title = info['title']
        content = info['content']
        name = info['name']
        keywords = info['keywords']

        print("info111",info)
        # 连接数据库并插入相应数据
        con = sqlite3.connect("news.db")
        cur = con.cursor()
        sql = "insert into news_info values ('%s', '%s','%s', '%s', '%s')" % (
            url, title, content, name, keywords)
        cur.execute(sql)
        con.commit()

    # 获取关键人物
    def key_figure(self, content):
        jieba.load_userdict('./userdic.txt')
        res = psg.cut(content)
        dict = {}
        for item in res:
            if item.flag == 'nr' and item.word in dict:
                dict[item.word] += 1
            elif item.flag == 'nr' and item.word not in dict:
                dict[item.word] = 1
        name_count = sorted(dict.items(), key=lambda x: x[1], reverse=True)
        # print("dict", name_count)
        if len(name_count) > 0:
            return name_count[0][0]
        else:
            return ''

    # 获取关键词
    def word_extract(self, content):
        # 读取文件
        corpus = []
        corpus.append(content)
        # 加载停用词
        stop_words = []
        path = './stopword.txt'
        for line in open(path, encoding='utf8'):
            line = line.strip()
            stop_words.append(line)
            # jieba分词
        split_words = []
        word_list = jieba.cut(corpus[0])
        for word in word_list:
            if word not in stop_words:
                split_words.append(word)
        # 提取前10个高频词
        dic = {}
        word_num = 5
        for word in split_words:
            dic[word] = dic.get(word, 0) + 1
        freq_word = sorted(dic.items(), key=lambda x: x[1],
                           reverse=True)[: word_num]
        print('样本：' + corpus[0])
        print('样本分词效果：' + '/ '.join(split_words))
        print('样本前5个高频词：' + str(freq_word))
        strs = []
        for word in freq_word:
            strs.append(word[0])
        print("strs","".join(strs))
        return ','.join("{0}".format(x) for x in strs)

