import scrapy
import re
from jsonpath import jsonpath
from scrapy import Request
from scrapy.http import HtmlResponse
import datetime
from urllib.parse import urljoin
from collections import Iterable
from torch.nn.utils.rnn import pad_sequence

from gensim.models import Word2Vec
from weibo_spyder.items import MBlogItem
from weibo_spyder.utils import processing
from weibo_spyder.defclass import LSTM

import torch

class WeiboSpider(scrapy.Spider):
    name = 'm_weibo'
    allowed_domains = ['m.weibo.cn']
    start_urls = [
        'https://m.weibo.cn/api/container/getIndex?containerid=106003type%3D25%26t%3D3%26disable_hot%3D1%26filter_type%3Drealtimehot&title=%E5%BE%AE%E5%8D%9A%E7%83%AD%E6%90%9C&show_cache_when_error=1&extparam=seat%3D1%26lcate%3D1001%26filter_type%3Drealtimehot%26dgr%3D0%26c_type%3D30%26mi_cid%3D100103%26region_relas_conf%3D0%26cate%3D10103%26pos%3D0_0%26display_time%3D1675314208%26pre_seqid%3D1168968431&luicode=10000011&lfid=231583']
    custom_settings = {
        'ELASTICSEARCH_INDEX': 'mblog',
        'ELASTICSEARCH_TYPE': 'MBlogItem',
        'ELASTICSEARCH_UNIQ_KEY': 'id',
        'DOWNLOAD_DELAY': 0.5,
        'RANDOMIZE_DOWNLOAD_DELAY': True,
        'ITEM_PIPELINES': {
            'weibo_spyder.pipelines.MBlogToESPipeline': 400,
        },
        'MAX_SEARCH_PAGE': 1
    }
    device = "cuda:0" if torch.cuda.is_available() else "cpu"
    net = torch.load("./model/lstm_5.model")
    word2vec = Word2Vec.load('./model/word2vec.model')

    def transtotimestamp(self, v_str):
        """转换GMT时间为时间戳"""
        GMT_FORMAT = '%a %b %d %H:%M:%S +0800 %Y'
        timeArray = datetime.datetime.strptime(v_str, GMT_FORMAT)
        ret_time = int(float(round(timeArray.timestamp()*1000)))
        return ret_time

    def transtostring(self, v_str):
        """转换GMT时间为标准格式"""
        GMT_FORMAT = '%a %b %d %H:%M:%S +0800 %Y'
        timeArray = datetime.datetime.strptime(v_str, GMT_FORMAT)
        return timeArray.strftime("%Y-%m-%d")

    def clean(self, text):
        """清洗微博正文"""
        dr = re.compile(r'<[^>]+>', re.S)
        text = dr.sub('', text)  # 正则表达式清洗微博内容
        usefulness = ["(", ")", "转发微博", "（", "）"]
        URL_REGEX = re.compile(
            r'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))',
            re.IGNORECASE)
        text = re.sub(URL_REGEX, "", text)  # 去除网址
        text = re.sub("\{%.+?%\}", " ", text)  # 去除 {%xxx%} (地理定位, 微博话题等)
        text = re.sub("@.+?( |$)", " ", text)  # 去除 @xxx (用户名)
        text = re.sub("【.+?】", " ", text)  # 去除 【xx】 (里面的内容通常都不是用户自己写的)
        text = re.sub("\u200b", " ", text)  # '\u200b'是这个数据集中的一个bad case, 不用特别在意
        for u in usefulness:
            text = text.replace(u, "")
        return text.strip()

    def parse_topics(self, text):
        """解析博文话题"""
        topics = re.findall(r"#[^#]+#", text)
        topicslist = list(topics)
        if len(topicslist) == 0:
            return ""
        else:
            return topicslist[0].strip('#')

    def collate_fn(self, data):
        """
        :param data: 第0维：data，第1维：label
        :return: 序列化的data、记录实际长度的序列、以及label列表
        """
        data.sort(key=lambda x: len(x[0]), reverse=True)  # pack_padded_sequence要求要按照序列的长度倒序排列
        data_length = [len(sq[0]) for sq in data]
        x = [i[0] for i in data]
        y = [i[1] for i in data]
        data = pad_sequence(x, batch_first=True, padding_value=0)  # 用RNN处理变长序列的必要操作
        return data, torch.tensor(y, dtype=torch.float32), data_length

    def sentimentAnalysis(self, text):
        """
        根据微博正文分析情感倾向
        """
        strs, data = [text], []
        for s in strs:
            vectors = []
            for w in processing(s).split(" "):
                if w in self.word2vec.wv.key_to_index:
                    vectors.append(self.word2vec.wv[w])  # 将每个词替换为对应的词向量
            vectors = torch.Tensor(vectors)
            data.append(vectors)
        x, _, lengths = self.collate_fn(list(zip(data, [-1] * len(strs))))

        with torch.no_grad():
            x = x.to(self.device)
            outputs = self.net(x, lengths)  # 前向传播
            outputs = outputs.view(-1)  # 将输出展平
        res = 0
        if outputs <= 0.25:
            res = -1
        if outputs >= 0.75:
            res = 1
        return res

    def parse(self, response: HtmlResponse, **kwargs):
        cards = response.json()['data']['cards']
        url_list = jsonpath(cards, '$..scheme')
        # 接口api
        domains = 'https://m.weibo.cn/api/container/getIndex'
        # 爬取最大页数
        max_search_page = self.settings.get('MAX_SEARCH_PAGE')
        for url in url_list:
            str_url = str(url)
            for page in range(1, max_search_page + 1):
                params = str_url[str_url.index('?'):] + '&page_type=searchall' + f'&page={page}'
                ret_url = urljoin(domains, params)
                yield Request(url=ret_url, callback=self.parse_Blog, dont_filter=True)

    def parse_Blog(self, response: HtmlResponse):
        cards = response.json()['data']['cards']
        # 文字微博、图片微博、视频微博的节点card_type=9
        card_list = jsonpath(cards, '$..*[?(@.card_type==9)]')
        # 解析mblog
        if isinstance(card_list, Iterable):
            for card in card_list:
                mblog_item = MBlogItem()
                mblog = card['mblog']
                # 微博正文
                text = self.clean(mblog['text'])
                mblog_item['topic'] = self.parse_topics(text)
                mblog_item['text'] = text
                # 微博情感
                mblog_item['sentiment'] = self.sentimentAnalysis(text)
                # 发布时间
                time = str(mblog['created_at'])
                mblog_item['created_time'] = self.transtotimestamp(time)
                mblog_item['created_time_text'] = self.transtostring(time)
                # 微博作者
                mblog_item['author'] = mblog['user']['screen_name']
                # 微博id
                mblog_item['mid'] = mblog['id']
                # 评论数
                mblog_item['comments_count'] = mblog['comments_count']
                # 转发数
                mblog_item['reposts_count'] = mblog['reposts_count']
                # 点赞数
                mblog_item['attitudes_count'] = mblog['attitudes_count']
                # 发布源
                mblog_item['source'] = mblog['source']
                # 发布地点
                location = ''
                mblog_item['province'] = ''
                if 'status_country' in mblog:
                    location = location + mblog['status_country'] + '-'
                if 'status_province' in mblog:
                    mblog_item['province'] = mblog['status_province']
                    location = location + mblog['status_province'] + '-'
                if 'status_city' in mblog:
                    location = location + mblog['status_city']
                mblog_item['location'] = location
                # 链接
                mblog_item['link'] = card['scheme']

                yield mblog_item
