# -*- coding: utf-8 -*-
import os
import re
from copy import deepcopy

import regex
import requests
import scrapy

from .base_spider import BaseSpider
from ..utils.config_util import (
    header_html,
    proxy_dict,
    header_css_svg
)
# bj(北京)
from ..utils.mysql_connect_util.bj_shop_comment_util import BJShopCommentMysqlConnectUtil as MysqlConnection

from ..utils.log_util import create_logger
from ..items import CommentItem

logger = create_logger('BJShopCommentSpider', 'bj_shop_comment_spider.log')


class BJShopNewCommentSpider(BaseSpider):
    """
    爬取北京热门店铺最新评论
    """
    download_delay = 0.2
    need_proxy = True

    name = 'bj_shop_comment'
    allowed_domains = ['www.dianping.com', 's3plus.meituan.net']

    def __init__(self):
        super().__init__()

        self.base_url_list = [
            "http://www.dianping.com/shop/{}/review_tuangou",
            "http://www.dianping.com/shop/{}/review_all",
            "http://www.dianping.com/shop/{}/review_all?queryType=isPic&queryVal=true",
            "http://www.dianping.com/shop/{}/review_all?queryType=reviewGrade&queryVal=good",
            "http://www.dianping.com/shop/{}/review_all?queryType=reviewGrade&queryVal=middle",
            "http://www.dianping.com/shop/{}/review_all?queryType=reviewGrade&queryVal=bad",
        ]

        self.pattern_lv = regex.compile(r"lv(\d+)\.png")
        self.pattern_star_level = regex.compile(r"sml-str(\d+) star")
        self.pattern_text = regex.compile(r"口味：|环境：|服务：|人均：|元")
        self.pattern_digit = regex.compile(r"[0-9\.]+")
        self.pattern_thumbs_up = regex.compile(r"赞.*?<em.*?>\((\d+)\)</em>.*?回应", re.S)
        self.pattern_sub = regex.compile(r"span.*?}.")
        self.pattern_sub_px = regex.compile(r"px")
        self.pattern_span_key = regex.compile(r"\.([a-z0-9]*?){")
        self.pattern_dot = regex.compile(r"\..*?}")
        self.pattern_span = regex.compile(r"<span.*?></span>", re.S)
        self.pattern_px = regex.compile(r"{.*?([-\d\.]+)px.*?([-\d\.]+)px.*?}")
        self.pattern_sub_others = regex.compile(r"<br>|<img.*?>|[ \t\r\n\xa0]+", re.S)
        self.pattern_less_words = regex.compile(r"<div class=\"less-words\">.*?</div>|[\t\n]", re.S)
        self.pattern_div_text = regex.compile(r"<div.*?>(.*?)</?div", re.S)
        self.pattern_log = regex.compile(r"shop_id=(.*?), url=(.*?),")

    def start_requests(self):
        result_list = list()
        with MysqlConnection() as (connection, cursor, select_sql, _):
            cursor.execute(select_sql)
            data_list = cursor.fetchall()
            for (shop_id, comment_number) in data_list:
                comment_number = int(comment_number or 0)
                if comment_number < 5000:
                    continue
                result_list.append(shop_id)
        for shop_id in result_list:
            for base_url in self.base_url_list:
                item = CommentItem()
                item["shop_id"] = shop_id
                url = base_url.format(shop_id)
                yield scrapy.Request(url, callback=self.parse, meta={"item": item, "url": url},
                                     headers=header_html, dont_filter=True)

        result_url_set = set()
        log_path = './log/'
        if os.path.exists(log_path):
            for log_file in os.listdir(log_path):
                if not log_file.startswith("bj"):
                    continue
                file_name = os.path.join(log_path, log_file)
                with open(file_name, 'r', encoding="utf-8") as fr:
                    while True:
                        line = fr.readline()
                        line = line.strip()
                        if not line:
                            break
                        if "url=" not in line or "shop_id=" not in line:
                            continue
                        data = self.pattern_log.search(line).groups()
                        result_url_set.add(data)
                os.remove(file_name)
                fp = open(file_name, "w", encoding="utf-8")
                fp.close()
        for shop_id, url in result_url_set:
            item = CommentItem()
            item["shop_id"] = shop_id
            yield scrapy.Request(url, callback=self.parse, meta={"item": item, "url": url},
                                 headers=header_html, dont_filter=True)

    def parse(self, response):
        url = response.meta.pop('url')
        if not self.is_valid_response(response):
            logger.error(
                f"shop_id={response.meta['item']['shop_id']}, url={url}, code={response.status}")
            return
        text = response.xpath(
            "//*[@id='review-list']/div[@class='review-list-container']/div[@class='review-list-main']"
            "/div[@class='reviews-wrapper']/div/text()").extract_first() or ""
        if "暂无点评" in text:
            return
        css_info_dict, svg_info_dict = dict(), dict()
        try:
            css_url = self.gain_url(self.pattern_css_url, str(response.body, encoding="utf-8"))
            css_html = self.handler_url(css_url, header_css_svg)
            css_info_dict = self.handler_css(css_html)
            svg_info_dict = self.search_svg_info(css_html)
            for (key, svg_html) in self.query_svg_html(svg_info_dict):
                svg_list = self.enter_handler_svg(svg_html)
                svg_info_dict[key][key] = svg_list
        except Exception as e:
            logger.error(f"shop_id={response.meta['item']['shop_id']}, url={url}, error={e}")
            return

        if (not svg_info_dict) or (not css_info_dict):
            return

        li_list = response.xpath("//div[@class='reviews-wrapper']/div[@class='reviews-items']/ul/li")
        error_set = set()
        for li_response in li_list:
            try:
                item = deepcopy(response.meta.get("item"))

                user_name = li_response.xpath(
                    ".//div[@class='main-review']/div[@class='dper-info']/a[@class='name']/text()").extract_first()
                if not user_name:
                    user_name = li_response.xpath(
                        ".//div[@class='main-review']"
                        "/div[@class='dper-info']"
                        "/span[@class='name']/text()").extract_first()
                user_name = user_name.strip()
                item["user_name"] = user_name

                user_level = li_response.xpath(".//div[@class='main-review']/div[@class='dper-info']"
                                               "/img[contains(@class, 'user-rank-rst')]/@src").extract_first()
                if not user_level:  # 用户无等级
                    user_level = 0
                else:
                    user_level = self.handler_user_level(user_level)
                item['user_level'] = user_level

                is_vip = li_response.xpath(".//div[@class='main-review']/div[@class='dper-info']"
                                           "/span/@class").extract_first()
                is_vip = 1 if is_vip == "vip" else 0
                item['is_vip'] = is_vip
                star_level = li_response.xpath(
                    ".//div[@class='main-review']/div[@class='review-rank']/span/@class").extract_first()
                star_level = self.handler_star_level(star_level)
                item['star_level'] = star_level

                span_list = li_response.xpath(
                    ".//div[@class='main-review']/div[@class='review-rank']/span[@class='score']/span[@class='item']")
                for span in span_list:
                    info = span.xpath(".//text()").extract_first()
                    if "口味" in info:
                        item["taste"] = self.handler_text(info)
                    elif "环境" in info:
                        item["environment"] = self.handler_text(info)
                    elif "服务" in info:
                        item["serve"] = self.handler_text(info)
                    elif "人均" in info:
                        item["average_price"] = self.handler_digit(info)
                for key in ["taste", "environment", "serve", "average_price"]:
                    if key == 'average_price':
                        item.setdefault(key, 0)
                    else:
                        item.setdefault(key, '')

                comment_time = li_response.xpath(
                    ".//div[@class='main-review']/div[@class='misc-info clearfix']"
                    "/span[@class='time']/text()").extract_first()
                item["comment_time"] = comment_time.strip()

                actions = li_response.xpath(
                    ".//div[@class='main-review']"
                    "/div[@class='misc-info clearfix']"
                    "/span[@class='actions']").extract_first()
                thumbs_up = self.handler_thumbs_up(actions)
                item["thumbs_up"] = thumbs_up

                favorite_dishes = li_response.xpath(
                    ".//div[@class='main-review']/div[@class='review-recommend']/a//text()").extract()
                favorite_dishes = ';'.join(favorite_dishes)
                item['favorite_dishes'] = favorite_dishes

                item['sku_number'] = self._hash('#'.join([item['shop_id'], user_name, comment_time]))

                comment = li_response.xpath(
                    ".//div[@class='main-review']/div[contains(@class, 'review-words')]").extract_first()
                comment = self.handler_review(comment, css_info_dict, svg_info_dict)
                item["comment"] = comment
                yield item
            except Exception as e:
                error = self.deal_with_error(e)
                error_set.add(f"shop_id={response.meta['item']['shop_id']}, url={url}, error={str(error)}")

        for error in error_set:
            logger.error(error)

    def handler_review(self, comment, css_info_dict, svg_info_dict):
        data = self.search_info(comment, css_info_dict, svg_info_dict)
        data = self.pattern_div_text.search(data).group(1)
        return self.clean(data)

    def handler_user_level(self, lv_url):
        return int(self.pattern_lv.search(lv_url).group(1))

    def handler_star_level(self, star_level_info):
        """
        存在评分没有星的用户
        """
        try:
            return int(self.pattern_star_level.search(star_level_info).group(1)) // 10
        except Exception as _:
            return 0

    def handler_digit(self, info):
        info = self.handler_text(info)
        try:
            return int(self.pattern_digit.search(info).group())
        except Exception as _:
            return 0

    def handler_text(self, info):
        info = info.strip()
        return self.pattern_text.sub("", info) or ''

    def handler_thumbs_up(self, thumbs_up):
        try:
            return int(self.pattern_thumbs_up.search(thumbs_up).group(1))
        except Exception as _:
            return 0

    @staticmethod
    def handler_url(url, headers):
        response = requests.get(url, headers=headers, proxies=proxy_dict)
        return response.text

    def clean(self, content):
        content = content.strip()
        content = self.pattern_sub_others.sub("，", content)
        return self.filter_emoji(content)

    def gain_value_dict(self, html, data_list, _dict):
        def _x(data):
            xx, yy = data
            xx, yy = self.pattern_sub_px.sub("", xx), self.pattern_sub_px.sub("", yy)
            return int(abs(float(xx))), int(abs(float(yy)))

        def search(y, _data_list):
            for data in _data_list:
                if data[0] > y:
                    return data

        html = self.pattern_sub.sub("\.", html)
        dot_list = self.pattern_dot.findall(html)
        for dot in dot_list:
            try:
                key = self.pattern_span_key.search(dot).group(1)
                px_list = self.pattern_px.findall(dot)[0]
                x, y = _x(px_list)
                y, content = search(y, data_list)
                _dict.setdefault(key, content[x // 14])
            except Exception as e:
                logger.error(f'dot={dot}, error={e}')
