import logging

import logs
import math
import time
import os
import sys
import uuid
import confhelper
os.chdir(os.path.dirname(__file__))

from asyncio import InvalidStateError
from playwright.sync_api import Playwright, sync_playwright

import dbmapper
from entity import LinkMonResult, MonLinkBatch, MonLinkVersion, MonLink


#logging.config.fileConfig('logging.ini')
logging.config.fileConfig("../logging.conf", disable_existing_loggers=False)
logger = logging.getLogger("tb")
#logger = logs.get_logger('tb')

wiew_size = {"width": 1080, "height": 4240}
# path_prefix = "d:/tmp/pic/taobao/"

work_dir = confhelper.confdata().get("work_dir")
linkpath_prefix = confhelper.confdata().get("linkpath_prefix")+"tb/"
# page_size = 24
picList = []
monLinks = []

logger.info("sys.argv: {}".format(sys.argv))
version = sys.argv[1]
def run(playwright: Playwright) -> None:
    browser = playwright.firefox.launch(headless=False,slow_mo=3000)

    #context = browser.new_context(viewport=wiew_size,storage_state="tb_state.json")
    #context = browser.new_context(storage_state="tb_state.json")
    context = browser.new_context(screen=wiew_size, viewport=wiew_size,storage_state="tb_state.json")

    # Open new page
    page = context.new_page()

    # Go to https://shop64356781.taobao.com/
    #page.goto("https://shop64356781.taobao.com/")
    #page.mouse.wheel(0,10000)
    #page.goto("https://lz888888.taobao.com/category.htm?spm=a1z10.1-c-s.w5001-21305331213.10.27d24163Si2GnA&search=y&v=1&scene=taobao_shop")

    i = 0
    monLinks.reverse()
    for link in monLinks:
        url = link.url
        linkId = link.id

        logger.info("开始遍历 url:{},linkId:{}".format(url, linkId))
        page.goto(url)

        if page.locator(".sufei-dialog-close").count()> 0:
            page.locator(".sufei-dialog-close").click()

        if page.locator("text=没有找到相应的店铺信息").count() > 0:
            logger.error("无效店铺: {}".format(url))
            continue

        #
        if page.locator(".J_SearchForm").locator("text=搜本店").count() > 0 :
            page.locator(".J_SearchForm").locator("text=搜本店").click()
        elif page.locator(".all-cats.popup-container").locator("text=所有分类").count() > 0 :
            page.locator(".all-cats.popup-container").locator("text=所有分类").click()
        elif page.locator("#searchHeader").locator("text=搜本店").count() > 0:
            page.locator('#q').fill('s')
            page.locator("#searchHeader").locator("text=搜本店").click()
            if page.locator(".all-cats.popup-container").locator("text=所有分类").count() > 0:
                page.locator(".all-cats.popup-container").locator("text=所有分类").click()
        elif page.locator("text=全部宝贝").count() > 0 :
            page.locator("text=全部宝贝").click()

        while page.locator(".captcha-tips").count() >0:
            logger.info("全部宝贝 出现图形验证等待5秒")
            #send_email("spidetb","出现图形验证等待5秒")
            #time.sleep(5)
            page.wait_for_timeout(5000)
        if page.locator(".sufei-dialog-close").count()> 0:
            page.locator(".sufei-dialog-close").click()
        #with page.expect_navigation():

        # if page.locator(".all-cats.popup-container").locator("text=所有分类").count() > 0:
        #     if page.locator(".all-cats.popup-container").locator("text=所有分类").is_visible():
        #         page.locator(".all-cats.popup-container").locator("text=所有分类").click()

        pageTotal = page.locator(".pagination.pagination-mini").locator(".page-info").text_content()
        pageNum = int(pageTotal.split("/")[1])
        logger.info("url {} pageTotal {} pagenum: {} ".format(url,pageTotal,pageNum))

        goodSum = page.locator('xpath=//*[@id="shop-search-list"]/div/div[2]/span').text_content().replace(" ", "")
        monLinkBatch = MonLinkBatch(linkId=linkId)
        linkBatchCount = dbmapper.MonLinkBatchMapper.count(monLinkBatch)
        if int(linkBatchCount) > 0:
            dbmapper.MonLinkBatchMapper.update(MonLinkBatch(goodSum=goodSum, linkId=linkId))
        else:
            monLinkBatchInsert = MonLinkBatch(goodSum=goodSum, linkId=linkId, status='0', linkUrl=url)
            dbmapper.MonLinkBatchMapper.insert(monLinkBatchInsert)

        goodLinks = []
        for p_num in range(1,pageNum+1):
            page.reload()
            logger.info("第{}页".format(p_num))

            if(p_num > 1):
                while page.locator(".pagination.pagination-mini").locator("text=下一页").count() == 0:
                    logger.info("下一页未加载出来，循环等待中")
                    # page.reload()
                    page.wait_for_timeout(5000)

                try:
                    page.locator(".pagination.pagination-mini").locator("text=下一页").click()
                except Exception as e:
                    logger.exception("next page traceback is:{}".format(e))
                    continue
            #print("第"+str(p_num)+"页 url size: "+str(len(pageLinks)))
            pageLinks = page.locator(".item3line1").locator(".item").all()
            if len(pageLinks) == 0:
                pageLinks = page.locator(".item30line1").locator(".item").all()

            if len(pageLinks) > 0 :
                goodLinks.extend(pageLinks)
                logger.info("当前商品链接集合大小：{}".format(len(goodLinks)))
        # with open(url_txt_end, "a") as file:
        #     file.write(url)
        #     file.write("\n")
        if len(goodLinks) > 0:
            view_page(p_num, i, page, goodLinks, picList, url, linkId)
        logger.info("结束遍历 {}".format(url))

        monLinkUpdate = MonLink(version=version, url=url)
        dbmapper.MonLinkMapper.update(monLinkUpdate)
    # Close page
    page.close()

    # ---------------------
    context.close()
    browser.close()


def view_page(p_num:int,i:int,page,pageLinks,picList, linkUrl, linkId):
    for p in pageLinks:
        i = i + 1
        j = 0
        logger.info("网页爬取进度:{}/{}".format(i, len(pageLinks)))

        if i % 20 == 0:
            logger.info("休眠一段时间。。。。")
            time.sleep(10)

        try:
            #page.wait_for_load_state('networkidle')
            with page.expect_popup(timeout=60000) as popup_info:
                p.first.click()
                logger.info("enabled: {}, visible: {}, url: {}".format(p.is_enabled(),p.is_visible(),""))
            page1 = popup_info.value
            #page1.mouse.wheel(0,10000)
            while page1.locator("text=Sorry").count() >0:
                logger.info("出现图形验证等待5秒")
                #time.sleep(5)
                #send_email("spidetb","出现图形验证等待5秒")
                page1.wait_for_timeout(5000)


            if page1.locator("text=宝贝详情").count() > 0 :

                if page1.locator("text=Sorry").count() == 0:
                    pic = linkpath_prefix +str(p_num)+"-"+str(i)+"-"+str(j)+page1.title().replace("*","").replace("/","#").replace(" ","")[0:30]+".png"
                    #time.sleep(2)
                    page1.wait_for_timeout(2000)
                    if page1.locator(".baxia-dialog-close").count()> 0:
                        page1.locator(".baxia-dialog-close").click()
                    page1.screenshot(path=pic,full_page=True)
                    picList.append(pic)

                    linkMonResult = LinkMonResult(str(uuid.uuid4().int), linkUrl, page1.url,
                                                  "/profile" + pic.replace(work_dir, ""), linkId=linkId)
                    count = dbmapper.LinkMonResultMapper.count(linkMonResult)
                    if int(count) > 0:
                        linkMonResult.status = 0
                        linkMonResult.ocrStr = ""
                        dbmapper.LinkMonResultMapper.update(linkMonResult)
                    else:
                        dbmapper.LinkMonResultMapper.insert(linkMonResult)

                    linkVersion = MonLinkVersion(linkId=linkId, linkUrl=linkUrl, goodUrl=page1.url, version=version)
                    lvCount = dbmapper.MonLinkVersionMapper.count(linkVersion);
                    if int(lvCount) <= 0:
                        dbmapper.MonLinkVersionMapper.insert(linkVersion)
                else:
                    logger.info("出现图形验证进入60秒睡眠")
                    #time.sleep(60)
                    page1.wait_for_timeout(60000)
            else:
                while page1.locator("text=宝贝详情").count() == 0:
                    logger.info("出现图形验证小弹框等待5秒")
                    #time.sleep(5)
                    #send_email("spidetb","出现图形验证等待5秒")
                    page1.wait_for_timeout(5000)

            page1.close()
        except UnboundLocalError as e:
            logger.exception("UnboundLocalError traceback is:{}".format(e))
        except InvalidStateError as e:
            logger.exception("InvalidStateError traceback is:{}".format(e))
        except TimeoutError as e:
            logger.exception("TimeoutError traceback is:{}".format(e))
        except Exception as e:
            logger.info("exception {} url:{}".format(i,page1.url))
            logger.exception("exception traceback is:{}".format(e))
            page1.close()

# url_txt = "txt/tb_url.txt"
# url_txt_end = "txt/tb_url_end.txt"
# def read_urls(file_path:str):
#     urls = []
#     with open(file_path, 'r') as f:
#         line = f.readline()
#         urls.append(line.replace("\n",""))
#         while line:
#             line = f.readline().replace("\n","")
#             urls.append(line)
#     return urls
#
# urls = read_urls(url_txt)
# urls_end = read_urls(url_txt_end)
os.chdir(os.path.dirname(__file__))
# 通过db加载url
monLink = dbmapper.MonLinkMapper.list_by_typea('tb', version)
if len(monLink) > 0:
    monLinks = monLink

logger.info("待监测的url %s",[(link.id,link.url) for link in monLinks])
if len(monLinks) > 0:
    with sync_playwright() as playwright:
        run(playwright)