import time
import re
import requests
from lxml import etree
from common.pre_config_brower import chrome_brower, request_for_sales
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait


class Johnlewis:
    def __init__(self, url):
        self.driver = chrome_brower(disableIMG=True, disableJS=False)
        self.home_url = url
        self.main_windows_id = None
        self.last_page = False
        self.open_web()

    #open the web pages
    def open_web(self):
        try:
            print(self.home_url)
            self.driver.get(self.home_url)
            self.main_windows_id = self.driver.window_handles[-1]
        except Exception as e:
            print("open the first page failure", e)

    # search the elements, e.g. production name , href, picture and so on
    def get_info_from_page(self):
        self.roll_to_top()
        self.driver.implicitly_wait(30)
        # time.sleep(10000)

        while True:
            items_num_info_list = self.driver.find_elements_by_xpath('//div[@data-test="product-image-container"]')
            items_num_per_page = len(items_num_info_list)
            print(items_num_per_page)
            if not self.last_page and items_num_per_page < 192:
                self.roll_to_top(500)
                continue
            else:
                break

        for item in items_num_info_list:
            produce_pic_item = item.find_element_by_xpath('./div/a')
            produce_url = produce_pic_item.get_attribute('href')
            jpg_url = produce_pic_item.find_element_by_xpath('./img').get_attribute('src')
            produce_name = produce_pic_item.find_element_by_xpath('./img').get_attribute('alt')
            if "Range" in produce_name:
                continue
            price_value = item.find_element_by_xpath('./a/div[2]').text

            if "http" not in jpg_url:
                jpg_url = "".join(["https:", jpg_url])
            jpg_url = jpg_url.replace("320$", "160$")
            # produce_info = self.more_produce_info(produce_url)

            # print(produce_url, jpg_url, produce_name)
            yield (jpg_url, produce_name, price_value, produce_url)
            # yield (jpg_url, produce_name, price_value, produce_url,
            #        produce_info.get("produce_code"),
            #        produce_info.get("description"),
            #        produce_info.get("specifications_list"))

    def roll_to_top(self, interval=1000):
        index = 0
        while index < 20000:
            js = "var q=document.documentElement.scrollTop=%s" % index
            self.driver.execute_script(js)
            time.sleep(1)
            index += interval

    def get_all_items(self):
        while True:
            next_page_url = self.driver.find_elements_by_css_selector('[data-test="next-btn"]')
            if next_page_url:
                self.home_url = next_page_url[0].get_attribute('href')
            else:
                self.last_page = True
                print("last page: %s" % self.last_page)

            item_num_str = self.driver.find_elements_by_css_selector('[data-test="heading-num-results"]')[0].text
            item_num = re.findall("(\d+)", item_num_str)[0]
            print("total_num %s" % item_num)

            for infor in self.get_info_from_page():
                yield infor

            if self.last_page:
                self.driver.close()
                break
            else:
                self.open_web()

    def open_url_in_new_window(self, url):
        js = "window.open('%s')" % url
        self.driver.execute_script(js)
        return self.driver.window_handles[-1]

    @staticmethod
    def more_produce_info(product_url):
        # print(product_url)
        produce_info = dict()
        while True:
            try:
                resp = requests.get(url=product_url)
                break
            except Exception as e:
                print(e)
                print("try again for url: %s" % product_url)
                time.sleep(2)

        html = etree.HTML(resp.text)

        produce_info["produce_code"] = html.xpath("//div[@id='store-stock-app']")[0].get("data-product-code")
        produce_code = html.xpath("//div[@id='store-stock-app']")
        if produce_code:
            produce_info["produce_code"] = produce_code[0].get("data-product-code")
        else:
            produce_info["produce_code"] = "Not found"

        product_description_list = html.xpath("//div[@class='product-detail__description-inner']//text()")
        description = ""
        for txt in product_description_list:
            tmp_txt = txt.replace("\n", "").strip()
            if tmp_txt:
                description = "\015".join([description, tmp_txt.strip()])
        produce_info["description"] = description

        product_specifications_list = dict()
        product_specifications_list_label = html.xpath("//dt[@class='product-specification-list__label']")
        product_specifications_list_value = html.xpath("//dd[@class='product-specification-list__value']")
        for index, label in enumerate(product_specifications_list_label):
            label = label.xpath("text()")[0].replace("\n", "").strip()
            if label:
                value = product_specifications_list_value[index].xpath("text()")[0].replace("\n", "").replace("  ", "").strip()
                product_specifications_list[label] = value

        produce_info["specifications_list"] = str(product_specifications_list).replace("',", "',\015")

        return produce_info

