# -*- coding:UTF-8 -*-
import json
import time
from datetime import date, timedelta

from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver import ActionChains
from selenium.webdriver.chrome.options import Options

from goods.models import Category, Goods

TB_LOGIN_URL = 'https://login.taobao.com/member/login.jhtml'
CHROME_DRIVER = 'C:\\Users\\liujie\\PycharmProjects\\TaoBaoKeDemo\\chromedriver.exe'
TB_Goods_url = 'https://s.taobao.com/list?spm=a21bo.2017.201867-links-0.3.781511d9Kjo7Es&q=%E8%BF%9E%E8%A1%A3%E8%A3%99&cat=16&style=grid&seller_type=taobao'


class SessionException(Exception):
    """
    会话异常类
    """

    def __init__(self, message):
        super().__init__(self)
        self.message = message

    def __str__(self):
        return self.message


class Crawler:

    def __init__(self):
        self.browser = None
        self.yesterday_date = None
        self.today_date = None

    def start(self, username, password):
        print("初始化日期")
        self.__init_date()
        print("初始化浏览器")
        self.__init_browser()
        print("切换至密码输入框")
        # self.__switch_to_password_mode()
        print(username)
        print(password)
        time.sleep(0.5)
        print("输入用户名")
        self.__write_username(username)
        time.sleep(2.5)
        print("输入密码")
        self.__write_password(password)
        time.sleep(3.5)
        print("程序模拟解锁")
        if self.__lock_exist():
            self.__unlock()
        print("开始发起登录请求")
        self.__submit()
        time.sleep(4.5)
        # 登录成功，直接请求页面
        print("登录成功，跳转至目标页面")
        self.__navigate_to_target_page(TB_Goods_url)
        time.sleep(4.5)
        # 开始抓取分类
        return self.__json_category_taobao_data()
        # crawler_list = [self.__json_taobao_data()]
        # # 爬取
        # for i in range(0, 1):
        #     self.browser.find_element_by_xpath('//*[@id="listsrp-pager"]/div/div/div/ul/li[8]/a').click()
        #     time.sleep(3.5)
        #     crawler_list.append(self.__json_taobao_data())
        # self.__save_list_to_db(crawler_list, goods_url)

    def __switch_to_password_mode(self):
        """
        切换到密码模式
        :return:
        """
        if self.browser.find_element_by_id('J_QRCodeLogin').is_displayed():
            self.browser.find_element_by_id('J_Quick2Static').click()

    def __write_username(self, username):
        """
        输入账号
        :param username:
        :return:
        """
        username_input_element = self.browser.find_element_by_id('fm-login-id')
        username_input_element.clear()
        username_input_element.send_keys(username)

    def __write_password(self, password):
        """
        输入密码
        :param password:
        :return:fm-login-password
        """
        password_input_element = self.browser.find_element_by_id("fm-login-password")
        password_input_element.clear()
        password_input_element.send_keys(password)

    def __lock_exist(self):
        """
        判断是否存在滑动验证
        :return:
        """
        return self.__is_element_exist('#nc_1_wrapper') and self.browser.find_element_by_id(
            'nc_1_wrapper').is_displayed()

    def __unlock(self):
        """
        执行滑动解锁
        :return:
        """
        bar_element = self.browser.find_element_by_id('nc_1_n1z')
        ActionChains(self.browser).drag_and_drop_by_offset(bar_element, 800, 0).perform()
        time.sleep(1.5)
        self.browser.get_screenshot_as_file('error.png')
        if self.__is_element_exist('.errloading > span'):
            error_message_element = self.browser.find_element_by_css_selector('.errloading > span')
            error_message = error_message_element.text
            self.browser.execute_script('noCaptcha.reset(1)')
            raise SessionException('滑动验证失败, message = ' + error_message)

    def __submit(self):
        """
        提交登录
        :return:
        """
        self.browser.find_element_by_xpath('//*[@id="login-form"]/div[4]/button').click()
        time.sleep(0.5)
        if self.__is_element_exist("#J_Message"):
            error_message_element = self.browser.find_element_by_css_selector('#J_Message > p')
            error_message = error_message_element.text
            raise SessionException('登录出错, message = ' + error_message)

    def __navigate_to_target_page(self, url):
        self.browser.get(url)
        pass

    # 解析淘宝数据
    def __json_taobao_data(self,category):
        # 拖动滚动条至底部
        for i in range(1, 100):
            self.browser.execute_script("document.documentElement.scrollTop=" + str(i * 100))
        html = self.browser.execute_script("return document.documentElement.outerHTML")
        soup = BeautifulSoup(html, "html.parser", from_encoding="utf-8")
        goods_list = []
        for tag in soup.find_all(attrs={"data-category": "auctions"}):
            title = tag.find("div", class_="row row-2 title").find("a")
            price = tag.find(class_="price g_price g_price-highlight").text
            xiadanpirce = tag.find(class_="deal-cnt").text
            imageurl = tag.find("div", class_="pic").find("img")["src"]
            wangwang = tag.find("div", class_="shop").text
            address = tag.find("div", class_="location").text
            good = {"title": str(title.text).replace("\n", ""), "goods_url": title["href"], "goods_price": price,
                    "sales_volume": xiadanpirce,
                    "image_url": imageurl, "address": address, "wangwang": str(wangwang).replace("\n", "")}
            self.browser.find_element_by_xpath('//*[@id="' + title['id'] + '"]').click()
            self.browser.switch_to.window(self.browser.window_handles[1])  # 切换到第一个选项卡
            time.sleep(15)
            self.browser.find_element_by_xpath('//*[@id="J_TabBar"]/li[2]/a').click()
            time.sleep(10)
            htmlInfo = self.browser.execute_script("return document.documentElement.outerHTML")
            soupInfo = BeautifulSoup(htmlInfo, "html.parser", from_encoding="utf-8")
            good['evaluate_all'] = soupInfo.find(id='J_RateCounter').string
            good['sales_counter'] = soupInfo.find(id='J_SellCounter').string
            good['evaluate_good'] = soupInfo.find(attrs={"data-kg-rate-stats": "good"}).string
            goods_list.append(good)
            gd = Goods(title=good["title"].strip(), image_url=good["image_url"], goods_url=good["goods_url"],
                       goods_price=good["goods_price"].strip(),
                       sales_counter=good["sales_counter"].strip(), sales_volume=good["sales_volume"].strip().replace("人付款",""),
                       evaluate_all=good["evaluate_all"].strip(),
                       evaluate_good=good["evaluate_good"].strip().replace("(","").replace(")",""),
                       category=category)
            gd.save()
            print(good)
            self.browser.close()
            self.browser.switch_to.window(self.browser.window_handles[0])
        self.browser.back()
        print(goods_list)
        return good

    def __json_category_taobao_data(self):
        self.browser.find_element_by_xpath('//*[@id="J_NavCommonRow_1"]/div[3]/span[2]').click()
        html = self.browser.execute_script("return document.documentElement.outerHTML")
        soup = BeautifulSoup(html, "html.parser", from_encoding="utf-8")
        crawler_list = []
        category_list = []
        i = 0
        for item in soup.find('div', id='J_NavCommonRowItems_1').find_all("a"):
            print(item["title"])
            category = Category(category=item["title"])
            category.save()
            i = i + 1
            self.browser.find_element_by_xpath('//*[@id="J_NavCommonRowItems_1"]/a[' + str(i) + ']').click()
            time.sleep(5)
            category_list.append({'name': item["title"]})
            crawler_list.append({"category": item["title"], "list": self.__json_taobao_data(item["title"])})
            time.sleep(6)
            self.browser.find_element_by_xpath('//*[@id="J_NavCommonRow_1"]/div[3]/span[2]').click()
        return self.__save_list_to_db(self, category_list, crawler_list)

    # 保存数据到数据库中
    def __save_list_to_db(self, category_list, crawler_list):
        # for item in category_list:
        #     category = Category(category=item)
        #     category.save()
        # for item in crawler_list:
        #     gd = Goods(title=item["title"], image_url=item["image_url"], goods_url=item["goods_url"], goods_price=item["goods_price"],
        #                sales_counter=item["sales_counter"], sales_volume=item["sales_volume"], evaluate_all=item["evaluate_all"],
        #                evaluate_good=item["evaluate_good"],
        #                category=category)
        #     gd.save()
        # print(crawler_list)
        # print(category_list)
        print('爬取完成')
        self.browser.close()
        return False

    def __init_date(self):
        date_offset = 0
        self.today_date = (date.today() + timedelta(days=-date_offset)).strftime("%Y-%m-%d")
        self.yesterday_date = (date.today() + timedelta(days=-date_offset - 1)).strftime("%Y-%m-%d")

    def __init_browser(self):
        """
        初始化selenium浏览器
        :return:
        """
        options = Options()
        # options.add_argument("--headless")
        prefs = {"profile.managed_default_content_settings.images": 1}
        options.add_experimental_option("prefs", prefs)
        options.add_argument('--proxy-server=http://127.0.0.1:9000')
        options.add_argument('disable-infobars')
        options.add_argument('--no-sandbox')
        # options.add_argument('--user-data-dir=' + r'C:/Users/NALA/AppData/Local/Google/Chrome/User Data')
        self.browser = webdriver.Chrome(executable_path=CHROME_DRIVER, options=options)
        self.browser.implicitly_wait(3)
        self.browser.maximize_window()
        self.browser.get(TB_LOGIN_URL)

    def __is_element_exist(self, selector):
        """
        检查是否存在指定元素
        :param selector:
        :return:
        """
        try:
            self.browser.find_element_by_css_selector(selector)
            return True
        except NoSuchElementException:
            return False


#if __name__ == '__main__':
    # 执行命令行
    # file = open('a', 'rb')
    # html = file.read()
    # soup = BeautifulSoup(html, "html.parser", from_encoding="utf-8")
    # title = soup.find("div", class_="row row-2 title").find("a").text
    # print(soup.find(class_="price g_price g_price-highlight").text)
    # print(title)
    # print(soup.find_all("div",class_='wangwang')[0].find("span").find("a")['href'])
    #Crawler().start('liujie770161055', '', TB_Goods_url)
    # soup = BeautifulSoup(open('test.html', 'r', encoding='UTF-8').read(), "html.parser", from_encoding="utf-8")
    # div = soup.find('div', id='J_NavCommonRowItems_1')
    # for item in div.find_all("a"):
    #     print(item["title"])
