#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
from urllib import response
import requests
from loguru import logger
import sys
from easymysql import EasyMysql
import re
import time
from pyquery import PyQuery as pq
import copy
import json


def newdata():
    jddata = {"店铺": "",
              "颜色": "",
              "版本": "",
              "购买方式": "",
              "单品链接": "",
              "价格": "",
              "优惠信息": "",
              "累计评价": "",
              "标题": "",
              "入网型号": "",
              "上市日期": "",
              "机型": "",
              "机身尺寸": "",
              "机身重量": "",
              "机身颜色": "",
              "屏幕尺寸": "",
              "屏幕分辨率": "",
              "屏幕特色": "",
              "屏幕刷新率": "",
              "电池容量": "",
              "充电功率": "",
              "无线充电": "",
              "系统": "",
              "5G网络": "",
              "4G网络": "",
              "SIM卡类型": "",
              "3G/2G网络": "",
              "双卡机类型": "",
              "SIM卡数量": "",
              "数据接口": "",
              "耳机接口": "",
              "充电接口": "",
              "生物识别": "",
              "后摄2-超广角像素": "",
              "后摄主像素": "",
              "拍照特色": "",
              "前摄主像素": "",
              "后摄3-tele像素": "",
              "包装清单": ""}
    return jddata


headers = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36",
           "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
           "accept-language": "zh-CN,zh;q=0.9",
           'sec-ch-ua': '"Google Chrome";v="105", "Not)A;Brand";v="8", "Chromium";v="105"'}


def match1(text, *patterns):
    for pattern in patterns:
        try:
            match = re.search(pattern, text)
        except(TypeError):
            match = re.search(pattern, str(text))
        if match:
            return match.group(1)
    return None


def matchall(text, patterns):
    ret = []
    for pattern in patterns:
        try:
            match = re.findall(pattern, text)
        except(TypeError):
            match = re.findall(pattern, str(text))
        ret += match

    return ret


def getprice(url, cookie):
    fk_headers = copy.deepcopy(headers)
    fk_headers["referer"] = url
    fk_headers["cookie"] = cookie

    skuid = match1(url, "(\d+)")
    url = f"https://fts.jd.com/prices/mgets?callback=jQuery1956413&type=1&area=12_978_980_36485&pdtk=&pduid=166492865177337109076&pdpin=jd_43724bc80c8c6&pin=jd_43724bc80c8c6&pdbp=0&skuIds={skuid}&ext=11100000&source=pc-item"

    response = requests.get(url, headers=fk_headers)
    return match1(response.text, '"p":"([\d\.]+)"')


def getpicCommentCount(url, cookie):

    fk_headers = copy.deepcopy(headers)
    fk_headers["referer"] = url
    fk_headers["cookie"] = cookie

    skuid = match1(url, "(\d+)")
    url = f"https://club.jd.com/comment/productCommentSummaries.action?referenceIds={skuid}&categoryIds=9987,653,655&callback=jQuery2680114&_={int(time.time()*1000)}"

    response = requests.get(url, headers=fk_headers)
    return match1(response.text, '"DefaultGoodCountStr":"([\u4e00-\u9fa5\w\+]+)"')


def first(mysqldb, s: int, url: str, cookie: str):
    fk_headers = copy.deepcopy(headers)
    fk_headers["referer"] = "https://list.jd.com/"
    fk_headers["x-requested-with"] = "XMLHttpRequest"
    fk_headers["Cookie"] = cookie
    page = (s // 30) + 1

    while page < 300:  # 官方一共只有 199页
        newurl = f"{url}&page={page}&s={s}"
        logger.info(f"===>>> get {newurl} ")
        try:
            response = requests.get(newurl, headers=fk_headers)
            lis = matchall(response.text, ['li data-sku="(\d+)"'])

            if len(lis) == 0:  # 如果获不得内容，跳出
                logger.debug(response.text)
                break

            for li in lis:
                # logger.debug(li)
                suburl = f"https://item.jd.com/{li}.html"
                logger.info(f"suburl => {suburl}")
                mysqldb.Mysql_Insert_Ex(
                    "pageurls", ("url", "urltype", "text"), (suburl, "一级", ""))
                s = s + 1

            logger.info(
                f"--------------------------- {s} ------------------------------")

            page = page+1
            time.sleep(5)
        except Exception as err:
            logger.error(err)
            break
        except KeyboardInterrupt:
            logger.error("user ctrl+c exit programe")
            break


def getpage(mysqldb: EasyMysql, cookie: str):
    global headers
    err, msg, data = mysqldb.Mysql_Select(
        "select `id`,`url` from `pageurls` where `text`=%s", (""))
    if not err:
        logger.error(msg)
        return
    index = 0
    maxcount = len(data)
    for id, url in data:
        try:
            response = requests.get(url, headers=headers)
            # =============== 提取 详情内容 =========================

            doc = pq(response.text)
            jddata = newdata()
            jddata["店铺"] = doc("#popbox > div > div.mt > h3 > a").text()
            jddata["颜色"] = doc(
                "#choose-attr-1 > div.dd > div.item.selected").text()
            jddata["版本"] = doc(
                "#choose-attr-2 > div.dd > div.item.selected").text()
            jddata["购买方式"] = doc(
                "#choose-attr-3 > div.dd > div.item.selected").text()
            jddata["单品链接"] = f"{url}"
            jddata["价格"] = getprice(url,cookie)
            jddata["优惠信息"] = doc(
                "#summary-quan > div.dd > dl > dd > a > span > span").text()
            jddata["累计评价"] = getpicCommentCount(url,cookie)
            jddata["标题"] = doc("div.itemInfo-wrap > div.sku-name").text()

            textdata = doc(
                "#detail > div.tab-con > div:nth-child(2) div.Ptable dl.clearfix")
            for item in textdata.items():
                itemtext = item.text().split("\n")
                logger.debug(itemtext)
                if jddata.__contains__(itemtext[0]):
                    jddata[itemtext[0]] = itemtext[-1]

            jddata["包装清单"] = doc(
                "#detail > div.tab-con > div:nth-child(2) > div.package-list > p").text()
            for key, value in jddata.items():
                logger.info(f"{key} => {value}")
            mysqldb.Mysql_Update(
                "update `pageurls` set `text` = %s where `id`=%s", (json.dumps(jddata, ensure_ascii=False).encode('utf8'), id))

            # =============== 提取 其它商品页 =======================
            lis = matchall(response.text, ['data-sku="(\d+)"'])
            if len(lis) == 0:  # 如果获不得内容，跳出
                logger.debug(response.text)
                continue

            for li in lis:
                suburl = f"https://item.jd.com/{li}.html"
                logger.info(f"suburl => {suburl}")
                mysqldb.Mysql_Insert_Ex(
                    "pageurls", ("url", "urltype", "text"), (suburl, "二级", ""))
            index = index + 1

            logger.info(
                f"------------------ {index} / {maxcount} ---------------------")

        except Exception as err:
            logger.error(err)
            time.sleep(10)
        except KeyboardInterrupt:
            logger.error("user ctrl+c exit programe")
            break


if __name__ == '__main__':
    import appconfig
    parser = argparse.ArgumentParser(description='app manage')
    parser_cmds = parser.add_mutually_exclusive_group()
    parser_cmds.add_argument(
        '--first', action='store_true', default=False, help="从分类起始页开始获得所有商品")
    parser_cmds.add_argument(
        '--getpage', action='store_true', default=False, help="抓取数据库里所有空的详细页的内容")
    parser.add_argument("-D", "--debug", action='store_true',
                        default=False, help="debug info ")

    args = parser.parse_args()
    try:
        if args.debug is False:
            logger.remove()
            fmt = "<green>{time:HH:mm:ss}</green> | <level>{level.name:<8}</level>| <level>{message}</level>"
            logger.add(
                sys.stderr, format=fmt, colorize=True, level="INFO")

        if args.first:

            Easydb = EasyMysql(appconfig.DB_Host, appconfig.DB_Port, appconfig.DB_User,
                               appconfig.DB_Passwd, appconfig.DB_DataBase, appconfig.DB_Charset)
            # s 表示开始的第几个商品，如果中间断开过，不需要重头开启，把s改成要起始的序列号
            # 如果有不清楚的地方，联系QQ:403453619
            first(Easydb, s=1, url=appconfig.url, cookie=appconfig.cookie)
            exit()
        if args.getpage:
            Easydb = EasyMysql(appconfig.DB_Host, appconfig.DB_Port, appconfig.DB_User,
                               appconfig.DB_Passwd, appconfig.DB_DataBase, appconfig.DB_Charset)
            getpage(Easydb, cookie=appconfig.cookie)
            exit()

        parser.print_help()
    except KeyboardInterrupt:
        logger.error("user ctrl+c exit programe")

    except SystemExit:
        logger.warning("system exit programe")
