from time import sleep, strftime, strptime, time, mktime
import pickle
from datetime import date, timedelta
from os import listdir, remove
from os.path import join, exists
from re import search
from kdriver import Kdriver
from info import Info
from selenium.webdriver.common.by import By
import xlwt
from bs4 import BeautifulSoup
from selenium import webdriver


class Good:
    def __init__(self):
        self.price = 0
        self.name = ""


class Action(Kdriver):
    def __init__(self):
        super(Action, self).__init__()
        self.remove_temp_img()
        self.login_url = "https://passport.jd.com/new/login.aspx"
        self.home_url = "https://www.jd.com/"
        self.shopcar_url = "https://cart.jd.com/cart_index/"
        self.workbook = xlwt.Workbook(encoding="utf-8")

    def search(self, key, filename):
        # Specify the path to the ChromeDriver executable
        # Initialize the Chrome WebDriver
        if not self.login_if_cookie_effective():
            self.guide_manual_login() if int(Info.is_manual) else self.auto_login()

        start_time = time()
        print("爬虫开始时间%s" % start_time)
        ret = self.main_index(key, filename)
        end_time = time()
        if ret:
            print("爬虫成功，结束时间%s" % end_time)
        else:
            print("爬虫失败，共耗时%s" % (end_time - start_time))

    def writeExcel(self, good_list, flag):
        count = 0
        worksheet = self.workbook.add_sheet("data" + str(flag))
        worksheet.write(count, 0, "序号")
        worksheet.write(count, 1, "价格")
        worksheet.write(count, 2, "名称")
        worksheet.write(count, 3, "图片")

        for g in good_list:
            count = count + 1
            worksheet.write(count, 0, count)
            worksheet.write(count, 1, g[0])
            worksheet.write(count, 2, g[1])
            worksheet.write(count, 3, g[2])

    def next_page(self, page):
        if page > 1:
            self.driver.find_elements(
                By.XPATH, "//*[@id ='J_bottomPage']/span[1]/a[9]"
            )[
                0
            ].click()  # 模拟点击下一页按钮
        good_list = self.Parse_Html_Page()
        self.writeExcel(good_list, page)  # 把结果保存至excel表格

    def parse_good_detail(self, url):
            currentDriver = Kdriver()
            try:            
                currentDriver.load_cookie(Info.cookie_path)
                currentDriver.driver.get(url)
                js = "var q=document.documentElement.scrollTop=10000"
                currentDriver.driver.execute_script(js)  
                sleep(5)
                html = self.driver.page_source
                soup = BeautifulSoup(html, "html.parser")

                if soup is None:
                    return None
                ret = Good()
                price_tag = soup.find("div", class_="itemInfo-wrap")
                price = price_tag.find("span", class_="price").text.strip()
                print(price)
                return ret
            except Exception as e:
                print(e)
                return None
            finally: 
                  currentDriver.close() 
        

    def Parse_Html_Page(self):  # 获取每页源码存入至resultlist列表中
        good_list = []  # 每次调用清空列表，避免每次获取数据都会累加上一页数据
        sleep(3)
        js = "var q=document.documentElement.scrollTop=10000"
        self.driver.execute_script(
            js
        )  # 因京东商品每页数据不是一次性加载出来(每次只加载30条数据)，但是每页有60条数据，这样爬取数据就不对了，所以要模拟鼠标手动刷新
        sleep(5)  # 刷新完再休眠5s
        html = self.driver.page_source  # 加载完所有商品获取网页源码
        soup = BeautifulSoup(
            html, "html.parser"
        )  # 用BeautifulSoup解析网页源码，便于后面获取网页信息
        # print(soup)
        goodslist = soup.select(
            "#J_goodsList>ul>li"
        )  # 用BeautifulSoup提供的css选择器select函数获取所有的li标签，也就是所有的商品信息
        # print(goodslist)
        # print(len(goodslist))
        resultlist = []
        for good in goodslist:  # 循环遍历商品信息存入至resultlist结果列表
            temp = []
            good_price = good.find("i").text
            good_name = good.find_all("em")[1].text
            aItem = good.find("a")
            if aItem is None:
                continue
            good_url = "https:" + aItem["href"]
            good_detail = self.parse_good_detail(good_url)
            # imgsrc = good.select(
            #     "div > div.p-img > a>img"
            # )  # 返回的类型为<class 'bs4.element.ResultSet'>，如果想操作的话需要转换为<class 'bs4.element.Tag'>
            # # 类型，所以if条件获取时需要写成imgsrc[0]
            # if imgsrc[0]["data-lazy-img"] == "done":
            #     image = "https:" + imgsrc[0]["src"]
            #     print(image)
            # else:
            #     image = "https:" + imgsrc[0]["data-lazy-img"]
            #     print(image)
            if good_detail is not None:
                temp.append(good_detail)
                resultlist.append(temp)

            # temp.append(image)
            resultlist.append(temp)
        return resultlist

    def main_index(self, key, filename):  # 主函数

        # self.driver.find_element(By.XPATH, "//input[@id='loginname']").send_keys(
        #     "497896413@qq.com"
        # )  # Replace with your JD.com username
        # self.driver.find_element(By.XPATH, "//input[@id='nloginpwd']").send_keys(
        #     "Qwerty&&123456"
        # )  # Replace with your JD.com password
        # self.driver.find_element(By.XPATH, "//a[@id='loginsubmit']").click()
        self.driver.get(self.home_url)
        sleep(5)  # 打开京东首页
        self.driver.maximize_window()  # 窗口最大化
        try:
            self.driver.find_element(By.ID, "key").send_keys(key)  # 输入关键字
            self.driver.find_element(
                By.XPATH, "//div[@id='search']/div/div[2]/button"
            ).click()
            sleep(5)
            total = self.driver.find_elements(
                By.XPATH, "//div[@id='J_bottomPage']/span[2]/em/b"
            )[
                0
            ].text  # 获取总页数
            for i in range(1, int(total) + 1):  # 从第一页开始遍历
                self.next_page(i)
        except Exception as e:
            print(e)
            return False
        self.workbook.save(filename + ".xls")
        return True

    def main(self):
        self.get_input_data()
        # 登录
        if not self.login_if_cookie_effective():
            self.guide_manual_login() if int(Info.is_manual) else self.auto_login()
        # 等待预定时间到达
        self.wait_time_up()
        # 根据链接加入购物车
        for goods in Info.goods:
            self.add_with_url(goods["goods_url"], goods["goods_num"])
        # 进入购物车
        self.driver.get(self.shopcar_url)
        # 提交购物车全部商品订单
        self.buy_all_in_shoppingcar()

    def get_input_data(self):
        self.driver.get("file:///" + Info.ui_path)
        if self.wait_element_load(".trigger_run", 24 * 60 * 60):
            data_list = self.driver.find_elements(By.CSS_SELECTOR, ".data_dict > li")
            goods_list = self.driver.find_elements(By.CLASS_NAME, "data_dict_goods")
            for data_block in data_list:
                key = data_block.get_attribute("class")
                value = data_block.get_attribute("textContent")
                setattr(Info, key, value)
            for goods_block in goods_list:
                goods_data = goods_block.find_elements(By.TAG_NAME, "li")
                temp = {}
                for goods_param in goods_data:
                    key = goods_param.get_attribute("class")
                    value = goods_param.get_attribute("textContent")
                    temp[key] = value
                Info.goods.append(temp)

    def guide_manual_login(self):
        self.driver.get(self.login_url)
        sleep(30)
        if self.is_login():
            self.save_cookie()
        else:
            raise TimeoutError

    def add_with_url(self, target_url, goods_num):
        self.driver.get(target_url)
        self.driver.find_element(By.ID, "buy-num").clear()
        self.driver.find_element(By.ID, "buy-num").send_keys(goods_num)
        self.wait_element_load("#InitCartUrl", 120)
        sleep(2)
        self.driver.find_element(By.ID, "InitCartUrl").click()

    def buy_all_in_shoppingcar(self):
        # 购物车全选
        secect_all_checkbox = self.driver.find_element(
            By.CSS_SELECTOR, ".select-all > .jdcheckbox"
        )
        secect_state = secect_all_checkbox.get_attribute("clstag").split("|")[-1]
        if not int(secect_state):
            secect_all_checkbox.click()
        # 点击支付
        self.driver.find_element(By.CLASS_NAME, "common-submit-btn").click()
        # 提交订单
        self.wait_element_load("#sumPayPriceId")
        # cost_raw = self.driver.find_element_by_id('sumPayPriceId').text
        # cost_really = sub('[^./0-9]', '', cost_raw)
        # if isinstance(Info.cost_limit, int) or isinstance(Info.cost_limit, str):
        #     if not cost_really == int(Info.cost_limit):
        #         raise ValueError('实际付款值与预期值不同，支付被阻止')
        # elif isinstance(Info.cost_limit, list):
        #     if not (cost_really>Info.cost_limit[0] and cost_really<Info.cost_limit[1]):
        #         raise ValueError('实际付款值与预期值不同，支付被阻止')
        # else:
        #     raise ValueError('Info.cost_limit数值不符合要求')
        self.driver.find_element(By.ID, "order-submit").click()

    def remove_temp_img(self):
        files = listdir(Info.temp_img_path)
        for file in files:
            if "png" in file or "jpg" in file:
                remove(join(Info.temp_img_path, file))

    def wait_time_up(self):
        run_time = search("(\d{2}:\d{2}:\d{2})", Info.run_time).group(0)
        now_datetime = strftime("%Y-%m-%d %H:%M:%S").split(" ")
        if Info.run_date == "today":
            run_date = now_datetime[0]
        # elif Info.run_date == 'tomorrow':
        else:
            run_date = (date.today() + timedelta(days=1)).strftime("%Y-%m-%d")
        if run_time == "now":
            run_time = now_datetime[1]
        time_raw = run_date + " " + run_time
        time_array = strptime(time_raw, "%Y-%m-%d %H:%M:%S")
        self.limit_time = int(mktime(time_array))
        while time() < self.limit_time:
            sleep(0.5)

    def delete_all_in_shoppingcar(self):
        pass

    def login_if_cookie_effective(self):
        if not exists(Info.cookie_path):
            return False
        driver = self.driver
        driver.get(self.home_url)
        self.load_cookie(Info.cookie_path)
        self.driver.get(self.home_url)
        return self.is_login()

    def is_login(self):
        if self.driver.current_url == self.home_url and not self.is_exist(
            ".link-login", text="你好，请登录"
        ):
            return True
        return False

    def auto_login(self):
        driver = self.driver
        driver.get(self.login_url)
        driver.maximize_window()
        # 输入用户名密码
        driver.find_element(By.CSS_SELECTOR, ".login-tab-r").click()
        driver.find_element(By.CSS_SELECTOR, "#loginname").send_keys(Info.jd_conut)
        driver.find_element(By.CSS_SELECTOR, "#nloginpwd").send_keys(
            Info.jd_password + "\n"
        )
        # 加载验证图片
        self.wait_element_load(".JDJRV-bigimg > img")
        if not self.move_slider():
            raise Exception
        self.save_cookie()

    def save_cookie(self):
        cookies = self.driver.get_cookies()
        with open(Info.cookie_path, "wb+") as fh:
            pickle.dump(cookies, fh)

    def move_slider(self):
        from kvalidate import Kvalidate

        k = Kvalidate(self.driver)
        index = 0
        while not self.is_login():
            index += 1
            if index > 20:
                return False
            sleep(1)
            bg_b64data = self.driver.find_element(
                By.CSS_SELECTOR, ".JDJRV-bigimg > img"
            ).get_attribute("src")
            gap_b64data = self.driver.find_element(
                By.CSS_SELECTOR, ".JDJRV-smallimg > img"
            ).get_attribute("src")
            bg = k.save_base64_to_png(bg_b64data)
            gap = k.save_base64_to_png(gap_b64data)
            move_length = k.get_move_length(bg, gap)
            tracks = k.get_tracks(move_length)
            slider = self.driver.find_element(By.CLASS_NAME, "JDJRV-slide-btn")
            k.slider_action(tracks, slider)
            sleep(1)
        return True


if __name__ == "__main__":
    try:
        action = Action()
        action.search("珀莱雅", "bolaiya")
    except:
        import traceback

        print(traceback.print_exc())
