import datetime
import json
import os
import re
import traceback
import uuid

import requests
from bs4 import BeautifulSoup
import html2text
from playwright.sync_api import sync_playwright

from eshoputils.http_client import HttpClient
from eshoputils.mysql_util import MySqlUtil
from graph.states import EshopAssistantState

def handle_route(route, request):
    # 如果请求是图片，则拦截并阻止
    if request.resource_type == "image":
        route.abort()
    else:
        route.continue_()

class BroswerExpressShopSpiderV2:
    def __init__(self, state:EshopAssistantState, config, urls):
        self.urls = urls
        self.state = state
        self.config = config

    def update_state_info(self, info:str):
        from graph.graph import EshopAssistantGraph
        self.state["task_info"].append(info)
        EshopAssistantGraph.graph.update_state(config=self.config, values = {"task_info":self.state["task_info"]})

    def get_unblock_endpoint(self, url):
        resp = HttpClient.post(url="https://production-sfo.browserless.io/chromium/unblock?token=QcDUw0Uu5aw59kedfea74fe0859ac73025650e8c68", body={
            "url":url,
            "browserWSEndpoint": True,
            "cookies": True,
            "content": False,
            "screenshot": False,
            "ttl": 30000
        })
        return resp

    def enter_shop(self):
        all_product_link = []
        for ui, url in enumerate(self.urls):
            if url[1] == "product":
                all_product_link.append((url, url[0], ui, None))
            else:
                print(f"开始爬取url：{url[0]}")
                self.update_state_info(f"开始爬取url：{url[0]}")
                with sync_playwright() as p:
                    browser = p.chromium.connect('wss://production-sfo.browserless.io/chrome/playwright?token=QcDUw0Uu5aw59kedfea74fe0859ac73025650e8c68&blockAds=true&launch={"stealth":true,"headless": false}')
                    context = browser.new_context(viewport={"width": 3840, "height": 2160})
                    context.route("**/*", handle_route)
                    context.set_default_timeout(3000)
                    list_page = context.new_page()
                    list_page.goto(url[0], timeout=0, wait_until="domcontentloaded")
                    list_page.wait_for_timeout(1000)
                    list_page.reload(timeout=0, wait_until="domcontentloaded")
                    list_page.wait_for_timeout(2000)
                    list_page.mouse.wheel(0, 1000)
                    list_page.wait_for_timeout(1000)
                    list_page.mouse.wheel(0, -1000)
                    list_page.wait_for_timeout(1000)
                    list_page.screenshot(path=f"./download/{ui}list.jpg")
                    flag = list_page.locator(selector='xpath=//span[text()="Orders"]')
                    flag2 = list_page.locator(selector="xpath=//span[@class='topRefine2023--text--39fcGoQ' and text()='Best Match']")
                    if flag.count()<=0 and flag2.count()<=0:
                        print(f'列表页没有展示,跳过此链接{url[0]}')
                        self.update_state_info(f'列表页没有展示,跳过此链接{url[0]}')
                        continue
                    galler_btn = list_page.locator("xpath=//span[text()='Gallery']")
                    if galler_btn.count() > 0:
                        galler_btn.click()
                        list_page.wait_for_timeout(1000)
                    if 'shop' == url[1]:
                        latest_products = list_page.locator(selector="xpath=//a[contains(@ae_button_type,'XRcmd-ItemClick')]")
                    else:
                        latest_products = list_page.locator("xpath=//a[@class='multi--container--1UZxxHY cards--card--3PJxwBm search-card-item']")
                    self.update_state_info(f'第一页一共有{latest_products.count()}个产品')
                    print(f'第一页一共有{latest_products.count()}个产品加入产品列表')
                    shop_name = None
                    if 'shop' == url[1]:
                        shop_name = list_page.locator("xpath=//a[contains(@data-href,'aliexpress.com/store')]/span").first.inner_text()
                    for index in range(latest_products.count())[0:self.config["run_info"].num]:
                        latest_product = latest_products.nth(index)
                        product_link = latest_product.get_attribute("href")
                        if self.config["run_info"].history_distinct:
                            # 历史去重
                            res = MySqlUtil.execute_sql(
                                f"select count(1) from spider_product_result where link = '{product_link}'").__next__()
                            if res[0] > 0:
                                continue
                        all_product_link.append((url, "https://"+product_link, ui, shop_name))
                    context.close()
        task_id = self.config['configurable']['thread_id']
        all_product_path = []
        for pi, product_link in enumerate(all_product_link):
            print(f"开始获取第{pi + 1}个商品")
            self.update_state_info(f"开始获取第{pi + 1}个商品")
            with sync_playwright() as p:
                # unblock_endpoint = self.get_unblock_endpoint(product_link[1])
                # browser = p.chromium.connect(unblock_endpoint["browserWSEndpoint"])
                # product_page = browser.contexts[0].pages[0]
                browser = p.chromium.connect('wss://production-sfo.browserless.io/chrome/playwright?token=QcDUw0Uu5aw59kedfea74fe0859ac73025650e8c68&blockAds=true&launch={"stealth":true,"headless": false}')
                context = browser.new_context(viewport={"width": 3840, "height": 2160})
                context.route("**/*", handle_route)
                context.set_default_timeout(2000)
                product_page = context.new_page()
                product_page.goto(product_link[1], timeout=0, wait_until="domcontentloaded")
                print("获取商品详情")
                self.get_product_info(url, task_id, all_product_path, product_link[1], pi, product_link[3], product_page)
                context.close()
        print("所有产品爬取完成")
        return all_product_path

    def get_product_info(self, url, task_id, product_paths, product_link, index, shop_name, product_page):
        try:
            product_page.reload(timeout=0, wait_until="domcontentloaded")
            current_date = datetime.datetime.now().strftime("%Y-%m-%d")
            product_page.wait_for_timeout(1000)
            product_page.mouse.move(0,500)
            product_page.wait_for_timeout(500)
            product_page.mouse.move(0, -500)
            more_btns = product_page.locator('xpath=//span[contains(text(),"View more")]')
            try:
                for ix in range(more_btns.count()):
                    more_btn = more_btns.nth(ix)
                    more_btn.click()
                    product_page.wait_for_timeout(500)
            except Exception as e:
                traceback.print_exc()
                print("more button 出错")
                self.update_state_info("more button 没找到")
            product_page.screenshot(path = f"./download/{index}product.jpg")
            beautifual = BeautifulSoup(product_page.content(), 'lxml')

            if url[1] == 'shop':
                type = "店铺"
            elif url[1] == 'product':
                type = '商品链接'
            else:
                type = '关键词'

            dir_name = None
            if type == '店铺':
                dir_name = shop_name
            elif type == '关键词':
                dir_name = url[1]

            if "aliexpress" in url[0]:
                platform_type = "express（速卖通平台）"
            else:
                platform_type = "shopee（虾皮平台）"

            if dir_name:
                #创建目录
                product_dir = f'download/{current_date}/{task_id}/{platform_type}/{type}/{dir_name}/{index+1}_{uuid.uuid4().hex}'
            else:
                product_dir = f'download/{current_date}/{task_id}/{platform_type}/{type}/所有商品/{uuid.uuid4().hex}'

            self.mkdir(product_dir)
            picdir = f'{product_dir}/oripic'
            self.mkdir(picdir)

            #获取信息
            #listing
            listing = product_page.locator('xpath=//h1[@data-pl="product-title"]').inner_text()
            print(f"第{index+1}个商品listing：{listing}")
            self.update_state_info(f"第{index+1}个商品listing：{listing}")

            #price
            price = self.get_price(product_page)
            print(f"第{index+1}个商品price：{price}")
            self.update_state_info(f"第{index+1}个商品price：{price}")

            #imgs
            res = re.findall("imagePathList\":\[[\d\w\-\.:\/\",]*\]", product_page.content())
            if res:
                good_img_urls = json.loads(res[0][15:])
                for ix, good_img_url in enumerate(good_img_urls):
                    gr = requests.get(good_img_url, stream=True)
                    with open(f"{picdir}/{ix+1}_{uuid.uuid4().hex}.png", "wb") as w:
                        w.write(gr.content)
            else:
                print(f'第{index+1}个商品没有图片：网页源码：{self.browser.page_source}')

            #sku_img
            sku_img_eles = product_page.locator("xpath=//div[contains(@class, 'sku-item--image')]/img")
            for ixs in range(sku_img_eles.count()):
                try:
                    sku = sku_img_eles.nth(ixs)
                    img_url = sku.get_attribute("src")
                    big_img_url = re.sub(r'[\d]{1,4}x[\d]{1,4}', "800x800",img_url)
                    gr = requests.get(big_img_url, stream=True)
                    with open(f"{picdir}/{ix+ixs+1}_{uuid.uuid4().hex}.png", "wb") as w:
                        w.write(gr.content)
                except:
                    print("获取sku出错")
                    self.update_state_info("获取sku出错")


            #爬specifications
            specification_divs = product_page.locator("xpath=//div[contains(@class,'specification--prop')]")
            specifications_info = {}
            for sp_index in range(specification_divs.count()):
                sp_div = specification_divs.nth(sp_index)
                title = sp_div.locator('xpath=./div[contains(@class,"specification--title")]').locator('xpath=./span').inner_text()
                desc = sp_div.locator('xpath=./div[contains(@class,"specification--desc")]').locator('xpath=./span').inner_text()
                if title and desc:
                    specifications_info[title] = desc

            #爬description
            description_wrapper = beautifual.find(name='div', id='nav-description')
            plain_html = str(description_wrapper)

            #原html
            with open(f'{product_dir}/des_plain_html.txt','w', encoding='utf-8') as file:
                file.write(plain_html)

            # #转md
            # md = html2text.html2text(plain_html)
            # with open(f'{product_dir}/des_md.md','w', encoding='utf-8') as file:
            #     file.write(md)

            #转纯文本
            plain_text = html2text.HTML2Text().handle(plain_html)
            with open(f'{product_dir}/des_plain_text.txt','w', encoding='utf-8') as file:
                file.write(plain_text)

            #组装信息
            product_info = {
                'link':product_link,
                'price':price,
                'listing':listing,
                'specifications':specifications_info
            }

            with open(f'{product_dir}/product_info.json', 'w') as file:
                json.dump(product_info, file, indent=4)
            product_paths.append(str(product_dir))
        except Exception as e:
            traceback.print_exc()
            print(f"{product_link}产品详情获取出错")
            with open("./download/errorpage.txt", "w") as file:
                file.write(product_page.content())

    def get_price(self, product_page):
        price = ""
        price_path = [
            '//*[@id="root"]/div/div[1]/div/div[1]/div[1]/div[2]/div[1]/div/span[1]',
            '//*[@id="root"]/div/div[3]/div/div[1]/div[1]/div[2]/div[1]/div[2]/span[1]',
            "//span[contains(@class, 'price--currentPriceText')]",
            "//span[contains(@class,'price--originalText')]"
        ]
        paths = '|'.join(price_path)
        price_ele = product_page.locator(paths)
        if price_ele.count() > 0:
            price = price_ele.first.inner_text()
        if not price:
            last_prict_ele = product_page.locator("xpath=//div[@class='es--wrap--vZDQqfj notranslate']")
            if last_prict_ele.count() > 0:
                last_prict_ele = last_prict_ele.first
                child_elements = last_prict_ele.locator('xpath=./*')
                for index in range(child_elements.count()):
                    ce = child_elements.nth(index)
                    price += ce.inner_text()
        return price

    def mkdir(self, path):
        folder = os.path.exists(path)
        if not folder:
            os.makedirs(path)


