# -*- coding: utf-8 -*-
# @Time    : 2020/5/21 10:12
# @Author  :
# @File    : down_routers.py
# @Software: PyCharm
import json
import re
import traceback
from parsel import Selector
import requests
from fastapi import APIRouter, Form

from app.core.return_info import ReturnInfo, SUCCESS, FAILED

router = APIRouter()


def checkExist(obj):
    if obj != None and len(obj) > 0:
        return True
    else:
        return False


@router.post("/login")
def login(*, proxy: str = Form(None), username: str = Form("13609452731"), password: str = Form("a123456")):
    rtn = ReturnInfo()
    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "Host": "u1.gsdata.cn",
        "Origin": "https://u1.gsdata.cn",
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'

    }
    proxies = None
    if proxy:
        proxies = {
            'http': "http://" + proxy,
            'https': "http:// + proxy"
        }
    r = None
    try:
        r = requests.post(
            url="https://u1.gsdata.cn/member/login?url=http://www.gsdata.cn/",
            headers=headers,
            data={
                "username": username,
                "password": password
            },
            proxies=proxies,
            allow_redirects=False
        )
    except:
        print(traceback.format_exc())
        rtn.status = FAILED
        rtn.msg = "请求错误，没有返回Response"
        return rtn
    if r.status_code != 302:
        rtn.status = FAILED
        rtn.msg = "登录平台返回状态码错误"
        return rtn
    rtn.status = SUCCESS
    rtn.msg = "登录成功"
    rtn.data = json.dumps(r.cookies.get_dict())
    # rtn.data = r.cookies.get_dict()
    return rtn.todict()


@router.post("/search")
def search(*, proxy: str = Form(None), cookies: str = Form(None), kword: str = Form(None)):
    rtn = ReturnInfo()

    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "Host": "www.gsdata.cn",
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'

    }
    proxies = None
    if proxy:
        proxies = {
            'http': proxy,
            'https': proxy
        }
    if cookies:
        cookies = json.loads(cookies)
    else:
        try:
            r = requests.get(
                url="http://www.gsdata.cn/",
                headers=headers,
                proxies=proxies
            )
        except:
            print(traceback.format_exc())
            rtn.status = FAILED
            rtn.msg = "请求错误，没有返回Response"
            return rtn
        cookies = r.cookies
        print(cookies)
    r = None
    try:
        r = requests.get(
            url="http://www.gsdata.cn/query/wx?q=" + kword,
            headers=headers,
            cookies=cookies,
            proxies=proxies
        )
    except:
        print(traceback.format_exc())
        rtn.status = FAILED
        rtn.msg = "请求错误，没有返回Response"
        return rtn
    if r.status_code != 200:
        rtn.msg_code = r.status_code
        rtn.status = FAILED
        rtn.msg = "登录平台返回状态码错误"
        return rtn
    content = r.content
    if content.startswith(b'\xef\xbb\xbf'):  # 去掉 utf8 bom 头
        content = content[3:]
    htmlText = content.decode('utf8').strip()
    if htmlText.find("为您找到相关结果") < 0:
        rtn.msg_code = r.status_code
        rtn.status = FAILED
        rtn.msg = "查询网页特征值错误"
        rtn.data = htmlText
        return rtn
    rtn.status = SUCCESS
    rtn.msg = "查询成功"
    dic = {
        "kword": kword,
        "html": htmlText
    }
    rtn.data = dic
    return rtn.todict()


@router.post("/parse_search")
def parse_search(*, kword: str = Form(None), html: str = Form(None)):
    rtn = ReturnInfo()
    sel = Selector(text=html)
    cnt = sel.xpath("//span[@id='qcount']/text()")
    substring = kword
    wxname = None
    if checkExist(cnt):
        cnt = int(cnt.extract()[0].replace(",", "").strip())
        if cnt > 0:
            li_wx = sel.xpath("//li[contains(@class,'list_query')]")
            if checkExist(li_wx):
                if cnt == 1:
                    href_a = li_wx[0].xpath("./div[@class='img-word']/div[@class='word']/h1/a/@href")
                    if checkExist(href_a):
                        wxname = href_a.extract()[0].split("wxname=")[1]
                elif cnt > 1:
                    for ch in kword:
                        if ch in "／，+(）.．：（·)、－":
                            substring = kword.split(ch)[0]
                            break
                    a_info = li_wx[0].xpath("./div[@class='img-word']/div[@class='word']/h1/a")
                    if checkExist(a_info):
                        nickname = a_info.xpath("string(.)").extract()[0]
                        if nickname.startswith(substring):
                            wxname = a_info.xpath("./@href").extract()[0].split("wxname=")[1]
    if wxname:
        rtn.status = SUCCESS
        rtn.msg = "[{}]解析成功".format(kword)
        dic = {
            "kword": kword,
            "wxname": wxname
        }
        rtn.data = dic
    else:
        rtn.status = FAILED
        dic = {
            "kword": kword,
            "wxname": ""
        }
        rtn.data = dic
        rtn.msg = "[{}]解析结果为空".format(kword)
    return rtn.todict()


@router.post("/detail")
def detail(*, proxy: str = Form(None), cookies: str = Form(None), wxname: str = Form(None)):
    rtn = ReturnInfo()
    cookies = json.loads(cookies)
    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "Host": "www.gsdata.cn",
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'

    }
    proxies = None
    if proxy:
        proxies = {
            'http': proxy,
            'https': proxy
        }
    try:
        r = requests.get(
            url="http://www.gsdata.cn/rank/wxdetail?wxname=" + wxname,
            headers=headers,
            cookies=cookies,
            proxies=proxies
        )
    except:
        print(traceback.format_exc())
        rtn.msg_code = r.status_code
        rtn.status = FAILED
        rtn.msg = "请求错误，没有返回Response"
        return rtn
    if r.status_code != 200:
        rtn.msg_code = r.status_code
        rtn.status = FAILED
        rtn.msg = "登录平台返回状态码错误"
        return rtn
    content = r.content
    if content.startswith(b'\xef\xbb\xbf'):  # 去掉 utf8 bom 头
        content = content[3:]
    htmlText = content.decode('utf8').strip()
    if htmlText.find("wxDetail-name") < 0:
        rtn.msg_code = r.status_code
        rtn.status = FAILED
        rtn.msg = "查询网页特征值错误"
        return rtn
    rtn.status = SUCCESS
    rtn.msg = "查询成功"
    dic = {
        "wxname": wxname,
        "html": htmlText
    }
    rtn.data = dic
    return rtn.todict()


@router.post("/parse_detail")
def parse_detail(*, kword: str = Form(None), wxname: str = Form(None), html: str = Form(None)):
    rtn = ReturnInfo()
    wxid = ""
    wxtitle = ""
    wxintro = ""
    totalViewCnt = None
    totalViewAddCnt = None
    avgViewCnt = None
    avgViewAddCnt = None
    viewingCnt = None
    viewingAddCnt = None
    toutiaoViewCnt = None
    toutiaoViewAddCnt = None
    wciCnt = None
    wciAddcnt = None
    rankCnt = None
    rankAddCnt = None
    zanCnt = None
    zanAddCnt = None
    todayTotalPubCnt = None
    sel = Selector(text=html)
    div_intro = sel.xpath("//div[contains(@class,'wxDetail-info')]")
    if checkExist(div_intro):
        label_title = div_intro.xpath("./div[contains(@class,'wxDetail-name')]/label[contains(@class,'fs22')]/text()")
        if checkExist(label_title):
            wxtitle = label_title.extract()[0].strip()
        div_info = div_intro.xpath("./div[contains(@class,'info-li')]")
        if checkExist(div_info):
            list_pwxid = div_info.xpath("./div[contains(@class,'fl')]/p")
            if checkExist(list_pwxid):
                for p_wxid in list_pwxid:
                    lable_tag = p_wxid.xpath("./label/text()").extract()[0].strip()
                    if "微信号" in lable_tag:
                        wxid = p_wxid.xpath("./text()").extract()[0].strip()
        # list_pinfo = div_intro.xpath("./p")
        # if checkExist(list_pinfo):
        #     for p_info in list_pinfo:
        #         lable_tag = p_info.xpath("./label/text()").extract()[0].strip()
        #         print(lable_tag)
        #         if "功能介绍" in lable_tag:
        #             wxintro = p_info.xpath("./span/text()").extract()[0].strip()
    list_stat = sel.xpath("//div[contains(@class,'wxDetail-data')]/div")
    if checkExist(list_stat):
        for div_stat in list_stat:
            lable_tag = div_stat.xpath("./div[contains(@class,'wxData-txt')]/span")
            lable_tag = lable_tag.xpath("string(.)").extract()[0].strip().replace(" ","")
            cnt1 = None
            cnt2 = None
            div_cnt = div_stat.xpath("./div[contains(@class,'wxData-cont')]")
            p_cnt1 = div_cnt.xpath("./p[2]/text()")
            if checkExist(p_cnt1):
                p_cnt1 = p_cnt1.extract()[0].strip()
                cnt1 = p_cnt1
            p_cnt2 = div_cnt.xpath("./p[3]")
            if checkExist(p_cnt2):
                p_cnt2 = p_cnt2.xpath("string(.)").extract()[0].strip()
                # p_cnt2 = p_cnt2.extract()[0].strip()
                cnt2 = p_cnt2
            print(lable_tag)
            if "总阅读量" in lable_tag:
                totalViewCnt = cnt1
                totalViewAddCnt = cnt2
            elif "头条阅读量" in lable_tag:
                toutiaoViewCnt = cnt1
                toutiaoViewAddCnt = cnt2
            elif "排名" in lable_tag:
                rankCnt = cnt1
                rankAddCnt = cnt2
            elif "在看数" in lable_tag:
                viewingCnt = cnt1
                viewingAddCnt = cnt2
            elif "点赞数" in lable_tag:
                zanCnt = cnt1
                zanAddCnt = cnt2
            elif "平均" in lable_tag:
                tmp = div_cnt.xpath("./p[1]/text()")
                if checkExist(tmp):
                    tmp = re.findall(r"\d+\.?\d*", tmp.extract()[0].strip())
                    if checkExist(tmp):
                        todayTotalPubCnt = tmp[0]
                avgViewCnt = cnt1
                avgViewAddCnt = cnt2
            elif "WCI" in lable_tag:
                wciCnt = cnt1
                wciAddcnt = cnt2

    rtn.status = SUCCESS
    rtn.msg = "解析成功"
    dic = {
        "kword":kword,
        "wxname": wxname,
        "wxid": wxid,
        "wxtitle": wxtitle,
        # "wxintro": wxintro,
        "totalViewCnt": totalViewCnt,
        "totalViewAddCnt": totalViewAddCnt,
        "avgViewCnt": avgViewCnt,
        "avgViewAddCnt": avgViewAddCnt,
        "viewingCnt": viewingCnt,
        "viewingAddCnt": viewingAddCnt,
        "toutiaoViewCnt": toutiaoViewCnt,
        "toutiaoViewAddCnt": toutiaoViewAddCnt,
        "wciCnt": wciCnt,
        "wciAddcnt": wciAddcnt,
        "rankCnt": rankCnt,
        "rankAddCnt": rankAddCnt,
        "zanCnt": zanCnt,
        "zanAddCnt": zanAddCnt,
        "todayTotalPubCnt": todayTotalPubCnt
    }
    rtn.data = dic
    return rtn.todict()
