# -*- coding: utf-8 -*-
import scrapy
import urllib.parse
import json
import datetime
import re
from w3lib.html import remove_tags
import requests
import time
import copy
import random
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from scrapy import signals

import settings
from utils import errors, common
from .myspider import MySpider
from items import TbItemRateItem

if not settings.DEBUG:
    from pyvirtualdisplay import Display


class TbItemRateSpider(MySpider):
    name = 'tb_itemrate'
    # allowed_domains = ['taobao.com']
    redis_key = 'tb_itemrate:start_urls'
    searchUrl = "https://s.taobao.com/search?"
    searchParams = {
        # "tab": "mall",
        "sort": "renqi-desc",  # 按人气排序
        "bcoffset": 4,
        "ntoffset": 4,
        "4ppushleft": "1%2C48"
    }

    login_url = "https://login.taobao.com/"
    login_params = {
        "username": "flyhighfairy@gmail.com",
        "password": "Fairy52hyy"
    }

    rateUrl_tb = "https://rate.taobao.com/feedRateList.htm?"
    rateParams_tb = {
        "currentPageNum": 1,
        "pageSize": 20
    }
    rateUrl_tmall = "https://rate.tmall.com/list_detail_rate.htm?"
    rateParams_tmall = {
        "order": 3,
        "currentPage": 1,
        "callback": "jsonp453"
    }

    no_comments = ["此用户没有填写评价。", "15天内买家未作出评价", "评价方未及时做出评价,系统默认好评!"]

    def __init__(self, **kwargs):
        super(TbItemRateSpider, self).__init__(**kwargs)
        if not settings.DEBUG:
            self.display = Display(visible=0, size=(800, 600))
            self.display.start()
        self.browser = webdriver.Chrome()
        # self.login()

    @classmethod
    def from_crawler(cls, crawler, *args, **kwargs):
        spider = super(TbItemRateSpider, cls).from_crawler(crawler, *args, **kwargs)
        crawler.signals.connect(spider.spider_closed, signals.spider_closed)
        return spider

    def spider_closed(self, spider):
        self.log.logger.debug('spider {0} closed'.format(self.name))
        self.browser.quit()
        if not settings.DEBUG:
            self.display.stop()

    def login(self):
        self.browser.get(self.login_url)  # login with account info in firefox manually

        try:
            # we have to wait for the page to refresh, the last thing that seems to be updated is the title
            WebDriverWait(self.browser, 10).until(EC.presence_of_element_located((By.ID, "btn-submit")))
        except Exception as e:
            self.log.logger.warning(str(e))
            raise

        try:
            # Selenium自动化登录淘宝
            self.browser.find_element_by_xpath(".//*[@id='username']").send_keys(self.login_params["username"])
            self.browser.find_element_by_xpath(".//*[@id='password']").send_keys(self.login_params["password"])
            time.sleep(2)  # 此处可能需要手工拉滑条验证
            self.browser.find_element_by_xpath(".//*[@id='btn-submit']").click()
            time.sleep(2)
        except Exception as e:
            self.log.logger.warning(str(e))
            raise

    def get_next_url(self, is_tmall, item_id, seller_id):
        if is_tmall:
            self.rateParams_tmall.update({"itemId": item_id})
            self.rateParams_tmall.update({"sellerId": seller_id})
            data = urllib.parse.urlencode(self.rateParams_tmall)
            next_url = self.rateUrl_tmall + data
        else:
            self.rateParams_tb.update({"auctionNumId": item_id})
            data = urllib.parse.urlencode(self.rateParams_tb)
            next_url = self.rateUrl_tb + data
        return next_url

    def parse(self, response):
        self.log.logger.debug('Parse URL: {0}'.format(response.url))
        domain = self.get_domain(response.url)
        if domain == settings.WEB_HOST:
            task_id = self.get_taskid(response.text)
            if task_id:
                keyword = self.get_keyword(task_id)
                self.searchParams.update({"q": keyword})
                self.searchParams.update({"s": 0})
                data = urllib.parse.urlencode(self.searchParams)
                next_url = self.searchUrl + data
                yield scrapy.Request(url=next_url, dont_filter=True, callback=self.parse, meta={'task_id': task_id})
        else:
            has_cookies = False
            task_id = response.meta.get("task_id", 0)
            pattern = re.compile(r'g_page_config = (?P<data>.*);')
            match = re.search(pattern, response.text)
            if match:
                jsonTxt = match.group('data')
                jsonDict = json.loads(jsonTxt)

                try:
                    resultList = jsonDict["mods"]["itemlist"]["data"]["auctions"]
                except Exception as e:
                    # 如果已搜索不到结果，则结束爬虫任务并返回
                    self.set_task_done(task_id)
                    return

                for entry in resultList:
                    item_id = entry["nid"]
                    seller_id = entry["user_id"]
                    is_tmall = entry["shopcard"]["isTmall"]
                    pic_url = entry["pic_url"] if "https:" in entry["pic_url"] else "https:" + entry["pic_url"]
                    next_url = self.get_next_url(is_tmall, item_id, seller_id)
                    self.log.logger.debug('Parse feedRateList: {0}'.format(next_url))
                    self.browser.get(next_url)
                    time.sleep(random.uniform(3, 5))
                    json_data = json.loads(common.extract_json(self.browser.page_source.strip()))
                    try:
                        if is_tmall:
                            comment_list = json_data["rateDetail"]["rateList"]
                        else:
                            comment_list = json_data["comments"]
                    except:
                        # self.login()
                        # continue
                        raise

                    for comment in comment_list:
                        if is_tmall:
                            rate_id = comment["id"]
                            content = comment["rateContent"]
                        else:
                            rate_id = comment["rateId"]
                            content = comment["content"]
                        if content in self.no_comments:
                            continue
                        item = TbItemRateItem()
                        item["crawler_task_id"] = task_id
                        item["item_id"] = item_id
                        item["rate_id"] = rate_id
                        item["title"] = remove_tags(entry["title"])
                        item["pic_url"] = pic_url
                        item["price"] = entry["view_price"]
                        item["content"] = content
                        item["crawled_time"] = datetime.datetime.now()
                        yield item

            if not self.is_task_done(task_id):
                # 继续yield下一页URL，meta带上task_id
                s = common.extract_num(response.url, prefix="s=")
                next_s = s + 44
                next_url = response.url.replace("s={0}".format(s), "s={0}".format(next_s))
                yield scrapy.Request(url=next_url, dont_filter=True, callback=self.parse, meta={'task_id': task_id})
            else:
                self.set_task_done(task_id)