import pandas as pd
import os
import xpinyin
import json
from urllib.parse import urlsplit, parse_qsl
import re
from concurrent.futures import ThreadPoolExecutor

pwd = os.path.dirname(os.path.dirname(__file__))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shop.settings')
import django

django.setup()

import snowflake.client
from bs4 import BeautifulSoup
import requests

headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36'
}


def get_snowflake_uuid():
    guid = snowflake.client.get_guid()
    return guid


def get_s(con):
    p = xpinyin.Pinyin()
    result = [p.get_initials(a, '') for a in [con]][0]
    return result


def get_name(rep):
    # url = f'https://npcitem.jd.hk/{sid}.html'
    # rep = requests.get(url, headers=headers)
    pattern = re.compile(r"name: '(.*?)'", re.MULTILINE | re.DOTALL)
    cont = re.findall(pattern, rep.content.decode())[0]
    return cont


def get_info(rep, sid):
    try:
        # url = f'https://npcitem.jd.hk/{sid}.html'
        pattern = re.compile(r"colorSize: \[(.*?)\], ", re.MULTILINE | re.DOTALL)
        cont = re.findall(pattern, rep.content.decode())[0]
        json_cont = json.loads('[' + cont + ']')
        # print(json_cont)
        filter_cont = [x for x in json_cont if x['skuId'] == int(sid)][0][list(json_cont[0].keys())[1]]

        # try:
        #     filter_cont = [x for x in json_cont if x['skuId'] == int(sid)][0]['规格']
        # except:
        #     filter_cont = [x for x in json_cont if x['skuId'] == int(sid)][0]['产品']
        return filter_cont
    except Exception as e:
        with open('cosme001.txt', 'a+', encoding='utf-8') as f:
            f.write(str(sid) + '\n')
        return ''


def get_content(sid, mid):
    url = f'https://cd.jd.com/description/channel?skuId={sid}&mainSkuId={mid}&charset=utf-8&cdn=2'
    rep = requests.get(url, headers=headers)
    return rep.json()['content'].replace('data-lazyload', 'src')


def get_id(url):
    # url = 'https://npcitem.jd.hk/27354256546.html'
    rep = requests.get(url, headers=headers)
    pattern = re.compile(r"desc: '(.*?)',", re.MULTILINE | re.DOTALL)
    cont = re.findall(pattern, rep.content.decode())[0]
    query = dict(parse_qsl(urlsplit(cont).query))
    skuId = query.get('skuId')
    mainSkuId = query.get('mainSkuId')
    content = get_content(skuId, mainSkuId)
    return content


def get_images(url):
    # url = 'https://npcitem.jd.hk/31497066274.html#crumb-wrap'
    rep = requests.get(url, headers=headers).content.decode()
    soup = BeautifulSoup(rep, 'lxml')
    imgs = ['https://img13.360buyimg.com/cms/' + x.get('data-url') for x in soup.select('#spec-list > ul > li > img')]
    return json.dumps(imgs)


def extract_url(url):
    # url = 'https://www.cosme-de.net/pd/products/ADN0141?product_no=ADN0215'
    split_url = urlsplit(url)
    path = split_url.path.split('/')[-1]
    query = dict(parse_qsl(split_url.query))['product_no']
    return path, query


from goods.models import Good, GoodCategory, GoodBrand, GoodTags, Source, Product, SPG


def gen_source():
    source = Source()
    source.name = 'cosme-de'
    source.url = 'https://www.cosme-de.net/'
    source.save()

    source = Source()
    source.name = 'petpochitto'
    source.url = 'https://www.petpochitto.com/'
    source.save()

    source = Source()
    source.name = 'superdelivery'
    source.url = 'https://www.superdelivery.com/'
    source.save()


def read_category1():
    # cate1
    df = pd.read_excel(r'cos-jd2.xlsx')

    for item, row in df.iterrows():
        try:
            category1 = GoodCategory()
            cate1_zh = row['cate1中文']

            # 一级
            s1 = GoodCategory.objects.filter(category_type=1).filter(name=cate1_zh)

            if s1.first():
                pass
            else:
                category1.name = cate1_zh
                category1.category_type = 1
                category1.parent_category = None
                category1.save()

        except Exception as e:
            with open('cosme-0527.txt', 'a+', encoding='utf-8') as f:
                f.write(str(item) + '\n')
            print(e)
            continue


def read_category2():
    # cate1
    df = pd.read_excel(r'cos-jd2.xlsx')

    for item, row in df.iterrows():
        try:
            category1 = GoodCategory()
            cate1 = row['cate1中文']
            cate1_zh = row['cate2中文']

            # 二级  过滤
            s1 = GoodCategory.objects.filter(category_type=2).filter(name=cate1_zh)

            if s1.first():
                pass
            else:
                category1.name = cate1_zh
                category1.category_type = 2
                category1.parent_category = GoodCategory.objects.filter(category_type=1).get(name=cate1)
                category1.save()


        except Exception as e:
            with open('cosme.txt', 'a+', encoding='utf-8') as f:
                f.write(str(item) + '\n')
            print(e)
            continue


def read_category3():
    # cate1
    df = pd.read_excel(r'cos-jd2.xlsx')

    for item, row in df.iterrows():
        try:
            category1 = GoodCategory()
            cate1 = row['cate2中文']
            cate1_zh = row['cate3中文']

            # 三级
            s1 = GoodCategory.objects.filter(category_type=3).filter(name=cate1_zh)

            if s1.first():
                pass
            else:
                category1.name = cate1_zh
                category1.category_type = 3
                category1.parent_category = GoodCategory.objects.filter(category_type=2).get(name=cate1)
                category1.save()


        except Exception as e:
            with open('cosme.txt', 'a+', encoding='utf-8') as f:
                f.write(str(item) + '\n')
            print(e)
            continue


def brand():
    import random
    df = pd.read_excel(r'cos-jd2.xlsx')
    bs = [
        'https://kaola-haitao.oss.kaolacdn.com/ii02po4970_300_300.jpg',
        'https://kaola-haitao.oss.kaolacdn.com/ii02shtz66_300_300.jpg',
        'https://kaola-haitao.oss.kaolacdn.com/1c2gohbio65_300_300.jpg',
        'https://kaola-haitao.oss.kaolacdn.com/1cnij2nrr52_300_300.jpg',
        'http://kaola-haitao.oss.kaolacdn.com/ir1s6rzz67_300_300.jpg',
        'https://kaola-haitao.oss.kaolacdn.com/1bk8ndmup45_300_300.jpg',
        'https://kaola-haitao.oss.kaolacdn.com/1el9nv9cj24_300_300.jpg',
        'https://kaola-haitao.oss.kaolacdn.com/ifxc1wa478_300_300.jpg',
        'https://kaola-haitao.oss.kaolacdn.com/1c126l96j31_300_300.jpg',
        'http://kaola-haitao.oss.kaolacdn.com/1btlvtbrr35_300_300.jpg',
        'https://kaola-haitao.oss.kaolacdn.com/ieww1xa027_300_300.jpg',
        'https://kaola-haitao.oss.kaolacdn.com/ig28fghh45_300_300.jpg',
        'https://kaola-haitao.oss.kaolacdn.com/ix2kwjyf95_300_300.jpg'
    ]
    for item, row in df.iterrows():
        try:
            cate = row['cate1中文']
            en = row['en_brand_y']
            jp = row['中文品牌名']
            b1 = GoodBrand.objects.filter(en_name=en).first()
            if b1:
                pass
            else:
                brand = GoodBrand()
                brand.category = GoodCategory.objects.filter(category_type=1).get(name=cate)
                brand.zh_name = jp
                brand.en_name = en
                brand.letter = str(en)[:1].upper() if len(str(en)) > 1 else '#'
                brand.image = random.choice(bs)
                brand.save()
        except:
            with open('cosme_brand.txt', 'a+', encoding='utf-8') as f:
                f.write(str(item) + '\n')
            continue


def get_price(sid):
    url = f'https://p.3.cn/prices/mgets?type=1&pdbp=0&skuIds=J_{sid}&ext=11100000&source=item-pc'
    rep = requests.get(url, headers=headers).json()[0]
    op = rep.get('op')
    m = rep.get('m')
    return op, m


def get_info_one(sid):
    # {'skuId': 72488038456, '规格': '白苔深层护发素250ml'}
    url = f'https://npcitem.jd.hk/{sid}.html'
    rep = requests.get(url, headers=headers)
    try:
        pattern = re.compile(r"colorSize: \[(.*?)\], ", re.MULTILINE | re.DOTALL)
        cont = re.findall(pattern, rep.content.decode())[0]
        json_cont = json.loads('[' + cont + ']')
        filter_cont = [x for x in json_cont if x['skuId'] == int(sid)][0]
        return filter_cont
    except:
        with open('cosme_bak.txt', 'a+', encoding='utf-8') as f:
            f.write(str(url) + '\n')
        # print(get_name(rep).split())
        return {'skuId': sid, "产品": get_name(rep)}


def task(row):
    try:
        # 需要加锁
        # (1062, "Duplicate entry 'ADN0141' for key 'goods_product_third_sn_80b601a7_uniq'")
        cate = row['cate3中文']
        en = row['en_brand_y']
        url = row['url']
        zh = row['中文品牌名']
        path, query = extract_url(url)
        real_url = row['映射网址']
        if Product.objects.filter(third_sn=path):
            pass
        else:
            sid = urlsplit(real_url).path.strip('/.html')
            rep = requests.get(real_url, headers=headers)
            attr = get_info(rep, sid)
            print(attr)
            good = Product()

            good.source = Source.objects.get(name='cosme-de')
            # bug 修复  控制分类范围
            good.category = GoodCategory.objects.filter(category_type=3).get(name=cate)
            good.brand = GoodBrand.objects.get(en_name=en)
            good.third_sn = path
            good.title = get_name(rep).rstrip(attr)
            good.sn = get_snowflake_uuid()
            good.detail = get_id(real_url)
            good.images = get_images(real_url)
            good.data_url = real_url
            good.source_url = url
            good.save()

    except Exception as e:
        url = row['url']
        print(e)
        with open('cosme.txt', 'a+', encoding='utf-8') as f:
            f.write(str(url) + '\n')
        # continue


def good():
    df = pd.read_excel(r'cos-jd3.xlsx')

    for _, row in df.iterrows():
        try:
            # 需要加锁
            # (1062, "Duplicate entry 'ADN0141' for key 'goods_product_third_sn_80b601a7_uniq'")
            cate = row['cate3中文']
            en = row['en_brand_y']
            url = row['url']
            zh = row['中文品牌名']
            real_url = row['映射网址']
            path, query = extract_url(url)

            if Product.objects.filter(third_sn=path):
                pass
            else:
                sid = urlsplit(real_url).path.strip('/.html')
                rep = requests.get(real_url, headers=headers)
                attr = get_info(rep, sid)
                print(attr)
                good = Product()

                good.source = Source.objects.get(name='cosme-de')
                # bug 修复  控制分类范围
                good.category = GoodCategory.objects.filter(category_type=3).get(name=cate)
                good.brand = GoodBrand.objects.get(en_name=en)
                good.third_sn = path
                good.title = get_name(rep).rstrip(attr)
                good.sn = get_snowflake_uuid()
                # good.detail = get_id(real_url)
                # good.images = get_images(real_url)
                good.data_url = real_url
                good.source_url = url
                good.save()

        except Exception as e:
            url = row['url']
            print(e)
            with open('cosme004.txt', 'a+', encoding='utf-8') as f:
                f.write(str(url) + '\n')
            # continue


def good_param():
    df = pd.read_excel(r'cos-jd4.xlsx')

    for item, row in df.iterrows():
        try:
            url = row['url']
            real_url = row['映射网址']
            # 根据path
            path, query = extract_url(url)

            is_exist = Good.objects.filter(source_id=query)

            if is_exist:
                pass
            else:
                # 获取属性值  即参数字段
                sid = urlsplit(real_url).path.strip('/.html')
                s, m = get_price(sid)
                print(s)
                # 如果异常取 产品名称
                attr = get_info_one(sid)
                param = Good()
                param.be_product = Product.objects.get(third_sn=path)
                param.sku_id = get_snowflake_uuid()
                param.source_id = query
                param.source_url = url
                param.data_url = real_url
                param.params = json.dumps(attr)
                param.detail = get_id(real_url)
                param.images = get_images(real_url)
                param.market_price = m
                param.sold_price = s
                param.save()

        except Exception as e:
            print(e)
            real_url = row['映射网址']
            with open('cosme0530.txt', 'a+', encoding='utf-8') as f:
                f.write(str(real_url) + '\n')
            continue


def tag():
    # empty separator
    # HTTPSConnectionPool(host='npcitem.jd.hk', port=443): Max retries exceeded with url: /50129053152.html (Caused by NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x0000023E8A8FEB70>: Failed to establish a new connection: [WinError 10060] 由于连接方在一段时间后没有正确答复或连接的主机没有反应，连接尝试失败。',))
    df = pd.read_excel(r'cos22.xlsx')

    for item, row in df.iterrows():
        try:
            tags = eval(row['tags_x'])
            for item in tags:
                t = GoodTags.objects.filter(jp_name=item).first()
                if t:
                    pass
                else:
                    tag = GoodTags()
                    tag.en_name = ''
                    tag.jp_name = item
                    tag.save()
        except:
            with open('cosme.txt', 'a+', encoding='utf-8') as f:
                f.write(str(item) + '\n')
            continue


def delete():
    # 这里用正则替换
    # 【品牌授权】(香港直邮）【香港直邮】 香港直邮 【香港JD物流】日本直邮 【618预售】 美国直邮  【预售】
    # 【急件慎拍】【法国直邮】【澳洲直邮】 【JD快递】 【海外直发包税】 【英国直邮】 【包税邮】
    for product in Product.objects.all():
        name = product.title
        product.title = name.replace('【包税邮】', '')
        product.save()


def ex():
    lt = '''
    https://www.cosme-de.net/pd/products/VE00197?product_no=VE00197
https://www.cosme-de.net/pd/products/CL00972?product_no=CL00834
https://www.cosme-de.net/pd/products/CL00789?product_no=CL00789
https://www.cosme-de.net/pd/products/LP00137?product_no=LP00137
https://www.cosme-de.net/pd/products/LP00403?product_no=LP00403
    '''
    nlt = lt.split()
    print(nlt)
    #     read_data=read_data[ ~ read_data['type'].str.contains('未知')]  #删除某列包含特殊字符的行
    # diff_df = alarm_df[~alarm_df['alarmuniqueid'].isin(exclude_alarmuniqueid_list)]
    df = pd.read_excel('cos-jd3.xlsx')
    df = df[~df['url'].isin(nlt)]
    df.to_excel('cos-jd4.xlsx')


if __name__ == '__main__':
    # read_category3()
    # brand()
    # gen_source()
    # good()
    # ex()
    delete()
    # good_param()
