from email import header
from time import sleep
from typing import final
import scrapy
import json
from scrapy.selector import Selector
from scrapy.shell import inspect_response
import isbnlib
import math
import re
import collections
import os
import jieba


# from zmq import proxy
from comments.utils import extract_chinese, outputInfo
import requests

from dotenv import dotenv_values

envs = dotenv_values('../.env')

proxyURL = envs['PROXY_URL']

def getCommentsUrl(id, page):
    return f'http://product.m.dangdang.com/review.php?pid={id}&main_pid=0&product_medium=0&sort_type=2&action=get_review_html_by_page&page={page}&label_id=0&filter_type=0&focusCurProduct=&first_in=0'


class CommentsSpider(scrapy.Spider):

    name = 'comments'

    jdIds = '27882546,25310842'

    isbn = '123'

    record = {}

    count=0

    completeCount=0

    proxy=''

    def get_ip(self):
        response = requests.request("GET", proxyURL)
        try:
            # inspect_response(response, self)
            responseJson = json.loads(response.text)
            assert int(responseJson['code']) == 0
            self.proxy = 'http://'+responseJson['data'][0]['ip'] + \
                ':'+str(responseJson['data'][0]['port'])
            outputInfo('proxy', self.proxy, self)
        except:
            outputInfo('fail', 'parse proxy ip error', self)
            return


    def start_requests(self):
        self.get_ip()

        with open('baidu_stopwords.txt', encoding='utf-8') as f:
            con = f.readlines()
            stop_words = set()
            for i in con:
                i = i.replace("\n", "")
                stop_words.add(i)
            self.stop_words=stop_words
            self.result_list=[]

        # 输出一次总数估计
        outputInfo('totalCount', len(self.jdIds.split(',')), self)

        for id in self.jdIds.split(','):
            self.record[id] = 0
            yield scrapy.Request(
                getCommentsUrl(id, 0),
                callback=self.parseCommentList,
                errback=self.errBack,
                meta={
                    "page": 0,
                    "proxy": self.proxy,
                    "id": id,
                },
            )

    def errBack(self, failure):
        inspect_response(response, self)
        id = failure.request.meta['id']
        page = int(failure.request.meta['page'])
        self.get_ip()
        yield scrapy.Request(
            getCommentsUrl(id, page),
            callback=self.parseCommentList,
            meta={
                "page": page,
                "proxy": self.proxy,
                "id": id,
            },
            dont_filter=True
        )

    def parseCommentList(self, response):
        try:
            # inspect_response(response, self)
            id = response.meta['id']
            page = int(response.meta['page'])
        except:
            # inspect_response(response, self)
            self.get_ip()
            yield scrapy.Request(
                getCommentsUrl(id, page),
                callback=self.parseCommentList,
                meta={
                    "page": page,
                    "proxy": self.proxy,
                    "id": id,
                },
                dont_filter=True
            )
            return

        content=response.selector.xpath('//p[@class="review_text j_review_text "]/text()').getall()
        time=response.selector.css('span.date::text').getall()
        star=response.selector.css('.star_num::text').getall()
        cid=response.selector.css('.comment_item::attr(comment_id)').getall()
        self.record[id] += len(content)

        for c in range(len(content)):
            yield {
                "id": cid[c],
                "content": content[c],
                "time": time[c],
                "score": int(star[c][:-1])/2,
                "productId": id
            }
            # 文本预处理  去除一些无用的字符   只提取出中文出来
            new_data = re.findall('[\u4e00-\u9fa5]+', content[c], re.S)
            new_data = " ".join(new_data)
            # 文本分词
            seg_list_exact = jieba.cut(new_data, cut_all=True)
            for word in seg_list_exact:
                # 设置停用词并去除单个词
                if word not in self.stop_words and len(word) > 1:
                    self.result_list.append(word)


        outputInfo('pageCount', {
            "id": id,
            "page": page,
            "count": len(content),
            "notBook": 0
        }, self)

        if len(content)==0:
            self.completeCount+=1
            outputInfo(
                'progress', self.completeCount, self
            )
            if self.completeCount==len(self.jdIds.split(',')):
                # 最后一页
                outputInfo(
                    'success', 'complete', self
                )
                # 筛选后统计
                word_counts = collections.Counter(self.result_list)
                # 获取前100最高频的词
                word_counts_top100 = [{'x': i[0], 'count': i[1]} for i in word_counts.most_common(100)]
                outputInfo('word', word_counts_top100, self)
                yield {
                    'type': 'word',
                    'content': word_counts_top100,
                    'isbn': self.isbn
                }
        else:
            yield scrapy.Request(
                getCommentsUrl(id, page+1),
                callback=self.parseCommentList,
                meta={
                    "page": page+1,
                    "proxy": self.proxy,
                    "id": id
                }
            )
        return

    def closed(self, reason):
        outputInfo('closed', reason, self)
