import datetime
import json
import os
import re
import time
import traceback
import uuid
from io import BytesIO

import imagehash
from PIL import Image
import requests
from bs4 import BeautifulSoup
import html2text
from playwright.sync_api import sync_playwright

from eshoputils.http_client import HttpClient
from eshoputils.mysql_util import MySqlUtil
from graph.states import EshopAssistantState

def handle_route(route, request):
    # 如果请求是图片，则拦截并阻止
    if request.resource_type == "image":
        route.abort()
    else:
        route.continue_()

class BroswerExpressShopSpider:
    def __init__(self, state:EshopAssistantState, config, urls, work_dir):
        self.urls = urls
        self.state = state
        self.config = config
        self.work_dir = work_dir
    def capsolver(self, siteurl, sitekey):
        try:
            api_key="CAP-F4615BF085D8FE1CEFD3D279402C268A"
            payload = {
                "clientKey": api_key,
                "task": {
                    "type": 'ReCaptchaV2TaskProxyLess',
                    "websiteKey": sitekey,
                    "websiteURL": siteurl
                }
            }
            res = HttpClient.post(url="https://api.capsolver.com/createTask", body=payload, timeout=10, json=False)
            resp = res.json()
            task_id = resp.get("taskId")
            if not task_id:
                self.update_state_info(f"创建task失败:{res.text}")
                return
            self.update_state_info("开始根据taskid获取token")
            count = 0
            while True:
                try:
                    count+=1
                    time.sleep(5)  # delay
                    payload = {"clientKey": api_key, "taskId": task_id}
                    res = HttpClient.post("https://api.capsolver.com/getTaskResult", body=payload, json=False, timeout=10)
                    resp = res.json()
                    status = resp.get("status")
                    if status == "ready":
                        return resp.get("solution", {}).get('gRecaptchaResponse')
                    if status == "failed" or resp.get("errorId"):
                        self.update_state_info("解决验证失败！！！！")
                        return None
                    self.update_state_info(f"第{count}token生成还没完成.....")
                except Exception as e:
                    self.update_state_info(f"获取token失败重试中......{e}")
                if count >= 5:
                    self.update_state_info("获取token超过5次放弃")
                    return None
        except Exception as e:
            self.update_state_info(f"获取token出错:{e}")
            return None
    def update_state_info(self, info:str):
        print(info)
        from graph.graph import EshopAssistantGraph
        self.state["task_info"].append(info)
        EshopAssistantGraph.graph.update_state(config=self.config, values = {"task_info":self.state["task_info"]})

    def check_product_list_slide(self):
        frame = self.first_page.locator('xpath=//*[@id="baxia-dialog-content"]')
        if frame.count() > 0:
            print("存在列表滑块。滑动")
            slide_btn = frame.locator('xpath=//*[@id="nc_1_n1z"]')
            if slide_btn.count() > 0:
                print("找到滑块准备滑动")
                box = slide_btn.bounding_box()

                # 计算起始位置和目标位置
                start_x = box["x"] + box["width"] / 2 + 2
                start_y = box["y"] + box["height"] / 2 - 1
                end_x = start_x + 300  # 向右拖动 100 像素
                # 执行滑动操作
                frame.mouse.move(start_x, start_y)
                frame.mouse.down()
                steps = 50  # 滑动的步数
                for i in range(steps):
                    # 计算当前步的目标位置
                    new_x = start_x + (end_x - start_x) * (i + 1) / steps
                    frame.mouse.move(new_x, start_y)
                    time.sleep(0.05)  # 每一步之间暂停 50 毫秒
                frame.mouse.up()


    def enter_shop(self):
            all_product_paths = []
            #每个url代表一个店铺
            for ui, url in enumerate(self.urls):
                with sync_playwright() as p:
                    self.p = p
                    self.browser = self.p.chromium.connect('wss://production-sfo.browserless.io/chrome/playwright?token=QcDUw0Uu5aw59kedfea74fe0859ac73025650e8c68&timeout=1800000&blockAds=true&launch={"stealth":true,"headless": false}')
                    self.context = self.browser.new_context(viewport={"width": 3840, "height": 2160})
                    #self.context.route("**/*", handle_route)
                    self.context.set_default_timeout(2000)
                    self.first_page = self.context.new_page()
                    try:
                        self.update_state_info(f"{self.work_dir[0]}:开始爬取url：{url[0]}")
                        start = time.time()
                        self.first_page.goto(url[0], timeout=0, wait_until="load")
                        print(f"{self.work_dir[0]}:网页加载时间{time.time() - start}")
                        self.first_page.mouse.wheel(0, 1000)
                        self.first_page.wait_for_timeout(1000)
                        self.first_page.mouse.wheel(0, -1000)
                        self.first_page.wait_for_timeout(1000)
                        btn18 = self.first_page.locator(selector="xpath=//div[contains(@class,'law18--btn--29ue4Ne')]")
                        if btn18.count() > 0:
                            try:
                                print(f"{self.work_dir[0]}:点击18禁按钮")
                                btn18.click()
                            except:
                                self.update_state_info(f"{self.work_dir[0]}:18禁按钮点击出错")
                        if url[1] == "product":
                            self.update_state_info(f"{self.work_dir[0]}:此链接直接爬取固定商品详情")
                            self.get_product_info(url, self.config['configurable']['thread_id'], all_product_paths, url[0], 0, self.first_page, ui)
                        else:
                            self.first_page.reload(timeout=0, wait_until="load")
                            self.first_page.wait_for_load_state(timeout=5000, state="load")
                            self.update_state_info(f"{self.work_dir[0]}:商品列表是否出现")
                            # self.first_page.screenshot(path=f"./download/{ui}list.jpg")
                            flag = self.first_page.locator(selector='xpath=//span[text()="Orders"]')
                            flag2 = self.first_page.locator(selector="xpath=//span[@class='topRefine2023--text--39fcGoQ' and text()='Best Match']")
                            if flag.count()<=0 and flag2.count()<=0:
                                self.update_state_info(f'{self.work_dir[0]}:列表页没有展示,跳过此链接{url[0]}')
                                self.first_page.screenshot(path=f"./download/{ui}errorlist.jpg")
                                continue
                            self.update_state_info(f'{self.work_dir[0]}:列表加载成功开始爬取商品')
                            galler_btn = self.first_page.locator("xpath=//span[text()='Gallery']")
                            if galler_btn.count() > 0:
                                try:
                                    galler_btn.click()
                                    self.first_page.wait_for_timeout(1000)
                                except:
                                    pass
                            #成功加载开始爬取
                            all_product_paths += self.crawl(url, ui)
                    except:
                        traceback.print_exc()
                        self.update_state_info(f'{self.work_dir[0]}:url={url[0]}爬取失败')
                    finally:
                        try:
                            self.context.close()
                        except:
                            pass
            #所有url爬完，关闭浏览器
            print("所有url爬完关闭浏览器")
            return all_product_paths

    def crawl(self, url, url_index):
        task_id = self.config['configurable']['thread_id']
        product_paths = []
        print('进入爬取')
        if 'shop' == url[1] or "shop_keyword" == url[1]:
            latest_products = self.first_page.locator(selector="xpath=//a[contains(@ae_button_type,'XRcmd-ItemClick')]")
        else:
            latest_products = self.first_page.locator("xpath=//a[@class='multi--container--1UZxxHY cards--card--3PJxwBm search-card-item']")
        self.update_state_info(f'{self.work_dir[0]}:第一页一共有{latest_products.count()}个产品')

        #只取前10个商品
        for index in range(latest_products.count())[0:self.config["run_info"].num]:
            self.update_state_info(f"{self.work_dir[0]}:开始获取第{index + 1}个商品")
            latest_product = latest_products.nth(index)
            product_link = latest_product.get_attribute("href")
            if self.config["run_info"].history_distinct:
                #历史去重
                res = MySqlUtil.execute_sql(f"select count(1) from spider_product_result where link = '{product_link}'")[0]
                if res[0] > 0:
                    continue
            # print('链接unblock')
            # print("https://"+product_link)
            # resp = HttpClient.post(
            #     url="https://production-sfo.browserless.io/chromium/unblock?token=QcDUw0Uu5aw59kedfea74fe0859ac73025650e8c68",
            #     body={
            #         "url": "https://"+product_link,
            #         "browserWSEndpoint": True,
            #         "cookies": True,
            #         "content": False,
            #         "screenshot": False,
            #         "ttl": 60000
            #
            # })
            # print('链接cdp')
            # new_browser = self.p.chromium.connect_over_cdp(resp["browserWSEndpoint"]+"?token=QcDUw0Uu5aw59kedfea74fe0859ac73025650e8c68&blockAds")
            # print('unblock over')
            # new_context = new_browser.contexts[0]
            # new_context.set_default_timeout(2000)
            # product_page = new_context.pages[0]

            new_context = self.browser.new_context(viewport={"width": 3840, "height": 2160})
            #new_context.route("**/*", handle_route)
            new_context.set_default_timeout(2000)
            product_page = new_context.new_page()
            start = time.time()
            product_page.goto("https:" + product_link, timeout=0, wait_until="load")
            print(f"{self.work_dir[0]}:网页加载时间{time.time() - start}")
            # 获取商品详情
            print(f"{self.work_dir[0]}:获取商品详情")
            self.get_product_info(url, task_id, product_paths, "https:" + product_link, index, product_page, url_index)
            self.update_state_info(f"{self.work_dir[0]}:第{index+1}个商品爬取完成")
            try:
                new_context.close()
                #product_page.close()
            except:
                pass
            #关闭标签回到商品列表页面
            #product_page.close()
        return product_paths

    def get_product_info(self, url, task_id, product_paths, product_link, index, product_page, parent_index):
        try:
            product_page.wait_for_timeout(2000)
            print(product_link)
            sitekey = None
            #校验是否出现人机校验
            for frame in product_page.frames:
                frame.wait_for_load_state(timeout=5000, state = "load")
                res = re.findall("sitekey:\"[\d\w\-\.:\/]*", frame.content())
                if len(res) > 0:
                    sitekey = res[0][9:]
                    sitekeyframe = frame
                    break
            if sitekey:
                self.update_state_info(f"出现人机校验sitekey:{sitekey}，framename:{sitekeyframe.name}")
                self.update_state_info(f"开始获取recaptcha的token")
                token = self.capsolver(product_link, sitekey)
                if token:
                    self.update_state_info("获取token成功开始绕过")
                    sitekeyframe.evaluate('__recaptchaValidateCB__("'+token+'");')
                    self.update_state_info("绕过完成开始等待页面重新加载")
                    product_page.wait_for_timeout(1000)
                    product_page.reload(timeout=0, wait_until="load")
                else:
                    self.update_state_info("获取token失败，取消这个产品的获取")
                    return
            current_date = datetime.datetime.now().strftime("%Y-%m-%d")
            more_btns = product_page.locator('xpath=//span[contains(text(),"View more")]')
            try:
                for ix in range(more_btns.count())[0:2]:
                    more_btn = more_btns.nth(ix)
                    more_btn.click()
                    product_page.wait_for_timeout(500)
            except Exception as e:
                self.update_state_info(f"{self.work_dir[0]}:more button 没找到")
            # product_page.screenshot(path = f"./download/{parent_index}{index}pic.jpg")
            product_page.wait_for_load_state(timeout=5000, state = "load")
            shop_name = None
            if 'shop' == url[1] or "shop_keyword" == url[1]:
                #shop_name = product_page.locator("xpath=//a[contains(@data-href,'aliexpress.com/store')]/span").first.inner_text()
                shop_name = product_page.locator("xpath=//div[contains(@class,'store-info--name')]/a[1]").first.inner_text()

            if url[1] == 'shop':
                type = "店铺"
            elif url[1] == 'product':
                type = '商品链接'
            elif url[1] == "shop_keyword":
                type = "店铺-关键字"
            else:
                type = '关键词'

            dir_name = None
            if type == '店铺':
                dir_name = shop_name
            elif type == '关键词':
                dir_name = url[1]
            elif type == "店铺-关键字":
                dir_name = shop_name+"/"+url[2]

            if "aliexpress" in url[0]:
                platform_type = "express（速卖通平台）"
            else:
                platform_type = "shopee（虾皮平台）"

            if dir_name:
                #创建目录
                product_dir = f'download/{current_date}/{task_id}/{platform_type}/{type}/{dir_name}/{index+1}_{uuid.uuid4().hex}'
            else:
                product_dir = f'download/{current_date}/{task_id}/{platform_type}/{type}/所有商品/{uuid.uuid4().hex}'

            self.mkdir(product_dir)
            picdir = f'{product_dir}/oripic'
            self.mkdir(picdir)

            #获取信息
            #listing
            listing = product_page.locator('xpath=//h1[@data-pl="product-title"]').inner_text()
            self.update_state_info(f"{self.work_dir[0]}:第{index+1}个商品listing：{listing}")

            #price
            price = self.get_price(product_page)
            self.update_state_info(f"{self.work_dir[0]}:第{index+1}个商品price：{price}")

            imgurl_set = set()
            img_hash_set = set()
            #imgs
            self.update_state_info(f"{self.work_dir[0]}:获取主图")
            res = re.findall("imagePathList\":\[[\d\w\-\.:\/\",]*\]", product_page.content())
            ix = 0
            if res:
                good_img_urls = json.loads(res[0][15:])
                for ix, good_img_url in enumerate(good_img_urls):
                    try:
                        gr = requests.get(good_img_url, timeout=(5,10))
                        imgurl_set.add(good_img_url)
                        image_content = gr.content
                        with BytesIO(image_content) as img_bytes:
                            img_hash_set.add(imagehash.average_hash(Image.open(img_bytes)))
                        with open(f"{picdir}/{ix+1}.png", "wb") as w:
                            w.write(image_content)
                    except:
                        print("图片获取失败")
                        pass
            else:
                print(f'{self.work_dir[0]}:第{index+1}个商品没有图片：网页源码：{self.browser.page_source}')


            self.update_state_info(f"{self.work_dir[0]}:获取sku图")
            #sku_img
            ixs = 0
            sku_img_eles = product_page.locator("xpath=//div[contains(@class, 'sku-item--image')]/img")
            for ixs in range(sku_img_eles.count()):
                try:
                    sku = sku_img_eles.nth(ixs)
                    img_url = sku.get_attribute("src")
                    big_img_url = re.sub(r'[\d]{1,4}x[\d]{1,4}', "800x800",img_url)
                    gr = requests.get(big_img_url, timeout=(5,10))
                    imgurl_set.add(big_img_url)
                    image_content = gr.content
                    with BytesIO(image_content) as img_bytes:
                        img_hash_set.add(imagehash.average_hash(Image.open(img_bytes)))
                    with open(f"{picdir}/{ix+ixs+2}.png", "wb") as w:
                        w.write(image_content)
                except:
                    self.update_state_info(f"{self.work_dir[0]}:获取sku出错")

            self.update_state_info(f"{self.work_dir[0]}:获取specification")
            #爬specifications
            specification_divs = product_page.locator("xpath=//div[contains(@class,'specification--prop')]")
            specifications_info = {}
            for sp_index in range(specification_divs.count()):
                sp_div = specification_divs.nth(sp_index)
                title = sp_div.locator('xpath=./div[contains(@class,"specification--title")]').locator('xpath=./span').inner_text()
                desc = sp_div.locator('xpath=./div[contains(@class,"specification--desc")]').locator('xpath=./span').inner_text()
                if title and desc:
                    specifications_info[title] = desc

            beautifual = BeautifulSoup(product_page.content(), 'lxml')
            #爬description
            description_wrapper = beautifual.find(name='div', id='nav-description')
            plain_html = str(description_wrapper)

            self.update_state_info(f"{self.work_dir[0]}:获取详情图")
            des_images = description_wrapper.find_all(name='img')
            for dix, des_img in enumerate(des_images):
                try:
                    des_img_url = des_img.get("src")
                    if des_img_url not in imgurl_set:
                        gr = requests.get(des_img_url, timeout=(5,10))
                        image_content = gr.content
                        with BytesIO(image_content) as img_bytes:
                            if imagehash.average_hash(Image.open(img_bytes)) in img_hash_set:
                                continue
                        with open(f"{picdir}/{dix + ix + ixs + 3}.png", "wb") as w:
                            w.write(image_content)
                except:
                    print("获取des图失败")
                    pass

            #原html
            with open(f'{product_dir}/des_plain_html.txt','w', encoding='utf-8') as file:
                file.write(plain_html)

            # #转md
            # md = html2text.html2text(plain_html)
            # with open(f'{product_dir}/des_md.md','w', encoding='utf-8') as file:
            #     file.write(md)

            #转纯文本
            plain_text = html2text.HTML2Text().handle(plain_html)
            with open(f'{product_dir}/des_plain_text.txt','w', encoding='utf-8') as file:
                file.write(plain_text)

            video_locator = product_page.locator("xpath=//video[contains(@class,'video')]/source[1]")
            video_link = ''
            if video_locator.count() > 0:
                video_link = video_locator.first.get_attribute("src")
            #组装信息
            product_info = {
                'link':product_link,
                'price':price,
                'listing':listing,
                'specifications':specifications_info,
                'video_link' : video_link
            }

            with open(f'{product_dir}/product_info.json', 'w') as file:
                json.dump(product_info, file, indent=4)
            product_paths.append(str(product_dir))
        except Exception as e:
            traceback.print_exc()
            self.update_state_info(f"{self.work_dir[0]}:第{index+1}产品详情获取出错")
            try:
                product_page.screenshot(path=f"./download/{parent_index}{index}errorpic.jpg")
            except:
                pass

    def get_price(self, product_page):
        price = ""
        price_path = [
            '//*[@id="root"]/div/div[1]/div/div[1]/div[1]/div[2]/div[1]/div/span[1]',
            '//*[@id="root"]/div/div[3]/div/div[1]/div[1]/div[2]/div[1]/div[2]/span[1]',
            "//span[contains(@class, 'price--currentPriceText')]",
            "//span[contains(@class,'price--originalText')]"
        ]
        paths = '|'.join(price_path)
        price_ele = product_page.locator(paths)
        if price_ele.count() > 0:
            price = price_ele.first.inner_text()
        if not price:
            last_prict_ele = product_page.locator("xpath=//div[@class='es--wrap--vZDQqfj notranslate']")
            if last_prict_ele.count() > 0:
                last_prict_ele = last_prict_ele.first
                child_elements = last_prict_ele.locator('xpath=./*')
                for index in range(child_elements.count()):
                    ce = child_elements.nth(index)
                    price += ce.inner_text()
        return price
    def mkdir(self, path):
        folder = os.path.exists(path)
        if not folder:
            os.makedirs(path)


