import json
import os
import re
import traceback
import uuid
from datetime import datetime


from domain.consts import EXPRESS_SEARCH_TEMPLATE
from domain.mapper.spider_result import SpiderResult
from domain.mapper.spider_task import SpiderTask
from eshoputils.mini_utils import MinioDB, ObjectType
from eshoputils.mysql_util import MySqlUtil
from eshoputils.utils import mkdir
from eshoputils.watermark import WaterMarkUtil
from graph.states import EshopAssistantState
from spider.broswerless_express_spider import BroswerExpressShopSpider

def update_state_info(state, config, info:str):
    from graph.graph import EshopAssistantGraph
    state["task_info"].append(info)
    EshopAssistantGraph.graph.update_state(config=config, values = {"task_info":state["task_info"]})

def broswerless_express_node(state:EshopAssistantState, config):
    """
    速卖通爬虫节点
    :param state:
    :param config:
    :return:
    """
    state["task_info"].append("进入速卖通爬取")
    run_info = config['run_info']
    res = MySqlUtil.execute_sql(f"select * from spider_param where user_id ='" + config['user_id'] + "'")[0]
    shop_urls = res[1].split("\n")
    keywords = res[2].split("\n")
    product_urls = res[3].split("\n")
    shop_and_keywords = json.loads(res[5])
    work_dir = config['work_dir']
    urls = []
    for shop_url_words in shop_and_keywords:
        su = shop_url_words["shop_url"].strip()
        if su:
            sks_str = shop_url_words["shop_keywords"]
            sks = sks_str.split("\n")
            for sk in sks:
                sk = re.sub(r'\s+', ' ', sk).strip()
                if sk:
                    sub_sks = sk.split(" ")
                    format_sk = "+".join(sub_sks)
                    if "?" in su:
                        if "&SearchText=" in su:
                            urls.append((re.sub(r"SearchText=[\d\w\+]+\&", f"SearchText={format_sk}&", su), "shop_keyword", sk))
                        else:
                            urls.append((su + f"&SearchText={format_sk}", "shop_keyword", sk))
                    else:
                        urls.append((su + f"?SearchText={format_sk}", "shop_keyword", sk))
    if run_info.shop:
        for shop_url in shop_urls:
            if shop_url:
                urls.append((shop_url.strip(), "shop"))
    if run_info.keyword:
        for keyword in keywords:
            if keyword:
                keyword = re.sub(r'\s+', ' ', keyword).strip()
                sub_ks = keyword.split(" ")
                format_keyword = "-".join(sub_ks)
                urls.append((EXPRESS_SEARCH_TEMPLATE.format(keyword=format_keyword), keyword))
    for product_url in product_urls:
        if product_url:
            urls.append((product_url, "product"))
    express_spider = BroswerExpressShopSpider(state, config, urls, work_dir)
    all_product_path = express_spider.enter_shop()
    return {'product_path': all_product_path}

# def express_spider_node(state:EshopAssistantState, config):
#     """
#     速卖通爬虫节点
#     :param state:
#     :param config:
#     :return:
#     """
#     state["task_info"].append("进入速卖通爬取")
#     run_info = config['run_info']
#     work_dir = config['work_dir']
#     res = MySqlUtil.execute_sql(f"select * from spider_param where user_id ='"+config['user_id']+"'")[0]
#     shop_urls = res[1].split("\n")
#     keywords = res[2].split("\n")
#     product_urls = res[3].split("\n")
#     urls = []
#     if run_info.shop:
#         for shop_url in shop_urls:
#             if shop_url:
#                 urls.append((shop_url.strip(), "shop"))
#     if run_info.keyword:
#         for keyword in keywords:
#             if keyword:
#                 keyword = re.sub(r'\s+', ' ', keyword)
#                 sub_ks = keyword.split(" ")
#                 format_keyword = "-".join(sub_ks)
#                 urls.append((EXPRESS_SEARCH_TEMPLATE.format(keyword=format_keyword), keyword.strip()))
#     for product_url in product_urls:
#         if product_url:
#             urls.append((product_url, "product"))
#     express_spider = ExpressShopSpider(state, config, urls, work_dir)
#     try:
#         all_product_path = express_spider.enter_shop()
#     except Exception as e:
#         raise e
#     finally:
#         try:
#             express_spider.browser.quit()
#         except:
#             pass
#     return {'product_path':all_product_path}


def shopee_spider_node(state:EshopAssistantState, config):
    """
    虾皮爬虫节点
    :param state:
    :param config:
    :return:
    """
    return {'product_path':[]}

def remove_watermark_node(state:EshopAssistantState, config):
    """
    去除水印节点
    :param state:
    :param config:
    :return:
    """
    update_state_info(state, config, f"{config['work_dir'][0]}:进入去除水印节点")
    all_product_path = state['product_path']
    for product_path  in all_product_path:
        no_watermark_path = os.path.join(product_path, "no_woatermark")
        mkdir(no_watermark_path)
        pics_path = f"{product_path}/oripic"
        for pic_name in os.listdir(pics_path):
            update_state_info(state, config, f"{config['work_dir'][0]}:正在去除{pic_name}图片的水印")
            pic_path = os.path.join(pics_path, pic_name)
            target_path = os.path.join(no_watermark_path, pic_name)
            #此图片的路径去掉水印保存
            try:
                WaterMarkUtil.remove_watermark_byapi(pic_path, target_path)
            except Exception as e:
                update_state_info(state, config, f"{config['work_dir'][0]}:水印模型出错{str(e)}")
                with open(pic_path, 'rb') as s, open(target_path, 'wb') as t:
                    t.write(s.read())
    return {'product_path':[]}

def add_watermark_node(state:EshopAssistantState, config):
    """
    加水印节点
    :param state:
    :param config:
    :return:
    """
    # update_state_info(state, config, f"{config['work_dir'][0]}:进入添加水印节点")
    # all_product_path = state['product_path']
    # for product_path  in all_product_path:
    #     add_watermark_path = os.path.join(product_path, "add_woatermark")
    #     mkdir(add_watermark_path)
    #     no_watermark_path = f"{product_path}/no_woatermark"
    #     for pic_name in os.listdir(no_watermark_path):
    #         pic_path = os.path.join(no_watermark_path, pic_name)
    #         with open(pic_path, 'rb') as s, open(add_watermark_path+f"/{pic_name}", 'wb') as t:
    #              t.write(s.read())
            # WaterMarkUtil.add_watermark(source_path=pic_path, target_path=add_watermark_path, add_text="watermark")
    return {'product_path':[]}

def extract_info(state:EshopAssistantState, config):
    """
    从des和specification中提取节点
    :param state:
    :param config:
    :return:
    """
    # update_state_info(state, config, f"{config['work_dir'][0]}:进入信息提取环节")
    # all_product_path = state['product_path']
    # for product_path in all_product_path:
        # product_path = os.path.normpath(product_path)
        # with open(os.path.join(product_path, "des_plain_text.txt"), "r", encoding="utf-8") as file:
        #     description = file.read()
        # with open(os.path.join(product_path, "product_info.json"), "r") as file:
        #     product_info = json.load(file)
        # #先判断这个商品是什么商品
        # product_type_chain = product_type_prompt | chat_model | StrOutputParser()
        # product_type = product_type_chain.invoke({
        #     "title" : product_info["listing"]
        # })
        # if product_type != "other":
        #     #手机壳和手机膜可以提取信息，其他的不提取没有json信息
        #     chain = extract_info_prompt | chat_model | JsonOutputParser()
        #     extract_info = chain.invoke({
        #         "description": description,
        #         "specifications": json.dumps(product_info['specifications'], ensure_ascii=False),
        #         "product_type":product_type
        #     })
        #     with open(os.path.join(product_path, "extract_info.json"), "w") as file:
        #         json.dump(extract_info, file, indent=4)
        # else:
        # with open(os.path.join(product_path, "extract_info.json"), "w") as file:
        #     json.dump({}, file, indent=4)
    return {'product_path':[]}

def listing_change(state:EshopAssistantState, config):
    """
    listing变形节点
    :param state:
    :param config:
    :return:
    """
    update_state_info(state, config, f"{config['work_dir'][0]}:进入listing变形缓解")
    return {'product_path':[]}

def archive_node(state:EshopAssistantState, config):
    """
    归档
    :param state:
    :param config:
    :return:
    """
    update_state_info(state, config, f"{config['work_dir'][0]}:进入归档环节")
    minio = MinioDB({
        "endpoint": "test.minio.epochingai.com:9001",
        "access_key": "admin",
        "secret_key": "abdFWBeffNk1Dwe",
        "secure": False,
        "bucket": "eshopassistant"
    })
    cloud_prefix = "http://test.minio.epochingai.com:9001/eshopassistant"

    task_id = config['configurable']['thread_id']

    #/2022-07-08/task_id/plateform/type/dirname/productpath
    link_set = set()
    print(f"{config['work_dir'][0]}:最终归档一共：{len(state['product_path'])}个产品")
    update_state_info(state, config, f"{config['work_dir'][0]}:最终归档一共：{len(state['product_path'])}个产品")
    for product_path in state['product_path']:
        try:
            with open(os.path.join(product_path, "des_plain_text.txt"), "r", encoding="utf-8") as file:
                description = file.read()
            with open(os.path.join(product_path, "product_info.json"), "r") as file:
                product_info = json.load(file)
            # with open(os.path.join(product_path, "extract_info.json"), "r") as file:
            #     ex_info = json.load(file)
            if config["run_info"].distinct:
                if link_set.__contains__(product_info["link"]):
                    continue
                link_set.add(product_info["link"])
            path_infos = product_path.split("/")
            crawl_date = path_infos[1]
            platform_type = path_infos[3]
            type = path_infos[4]
            if type == "店铺-关键字":
                dir_name = path_infos[5] + "/" + path_infos[6]
            else:
                dir_name = path_infos[5]
            product_id = uuid.uuid4().hex
            pic_paths = []
            #上传图片
            oripic_path = os.path.join(product_path, "oripic")
            no_woatermark_path = os.path.join(product_path, "no_woatermark")
            #add_woatermark_path = os.path.join(product_path, "add_woatermark")
            for pic_name in os.listdir(oripic_path):
                oripic = os.path.join(oripic_path, pic_name)
                nowaterpic = os.path.join(no_woatermark_path, pic_name)
                #waterpict = os.path.join(add_woatermark_path, pic_name)
                oripic_upload_path = f"pic/{crawl_date}/{task_id}/{product_id}/oripic/{pic_name}"
                nowaterpic_upload_path = f"pic/{crawl_date}/{task_id}/{product_id}/nowatermark/{pic_name}"
                #waterpic_upload_path = f"pic/{crawl_date}/{task_id}/{product_id}/watermark/{pic_name}"

                minio.put_object(oripic_upload_path, oripic, ObjectType.file)
                minio.put_object(nowaterpic_upload_path, nowaterpic, ObjectType.file)
                #minio.put_object(waterpic_upload_path, waterpict, ObjectType.file)
                pic_paths.append([f"{cloud_prefix}/{oripic_upload_path}", f"{cloud_prefix}/{nowaterpic_upload_path}"])

            SpiderResult.insert_product_result(**{
                "id":product_id,
                "task_id":task_id,
                "platform":platform_type,
                "crawl_type":type,
                "link":product_info["link"],
                "video_link":product_info.get("video_link", ""),
                "listing":product_info["listing"],
                "price":product_info["price"],
                "specifications":product_info["specifications"],
                "description":description,
                "crawl_date":crawl_date,
                "pic_paths":pic_paths,
                "target_name" : dir_name
            })
        except:
            traceback.print_exc()
    update_state_info(state, config, f"{config['work_dir'][0]}:任务结束！！！！！！！")
    task_info = "<br>".join(state["task_info"])
    SpiderTask.update_param_by_id(task_id, {
        'task_state':'finish',
        'task_info':task_info,
        'finish_time':datetime.now()
    })





