# -*- coding: utf-8 -*-

"""
通过将图片转成百度图片搜索的格式,
发起请求,将图片检索结果封装为json返回
{
    "guessWord":"图片检索结果关键字",
    "details":{
        "similarity":"相似度",
        "name":"名字",
        "birth":"生日",
        "synop":"概要"
    }
}
"""
import requests
import os
import json
import re
from bs4 import BeautifulSoup

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
}

source = "http://image.baidu.com"
# baike_source = "https://baike.baidu.com/search/word?word="

# 获取vs
vs_url = source + "/?fr=shitu"
vs_page = requests.get(vs_url, headers=headers).text
vs_id = re.findall('window.vsid = "(.*?)"', vs_page)[0]

url = "/pcdutu/a_upload?fr=html5&target=pcSearchImage&needJson=true"


class BaiduGuessImg(object):
    def __init__(self, name, fpath):
        self.imgpath = fpath
        self.imgname = name 
        self.result = {"guessWord": "", "details": "", "from": "baidu"}

    # def __clearResult(self):
    #     global result
    #     global resultBaike
    #     resultBaike = {"similarity": "", "name": "", "birth": "", "synop": ""}
    #     result = {"guessWord": "", "details": ""}

    def tryGuess(self):
        """
        通过图片获取检索关键字
        """
        if self.imgpath == None or self.imgname == None:
            print("img name or path is none !")
            return None
        self.__tryRequest()
        return self.result

    def __tryRequest(self):
        print("try request baidu")

        try:
            files = {"file": (self.imgname, open(self.imgpath, "rb"), "image/jpeg"), "pos": (None, "upload"),
                     "uptype": (None, "upload_pc"), "fm": (None, "home")}

            r = requests.post(source + url, headers=headers,
                              files=files, timeout=10)
            tmp = r.text
            tmp_json = json.loads(tmp)
            queryImageUrl = tmp_json["url"]
            querySign = tmp_json["querySign"]
            # simid = tmp_json["simid"]
            url2 = source + "/pcdutu?queryImageUrl=" + queryImageUrl + "&querySign=" + \
                querySign + "fm=index&uptype=upload_pc&result=result_camera&vs=" + vs_id
            r2 = requests.get(url2, headers=headers).text
            guessWord = re.findall("'guessWord': '(.*?)'", r2)[0]
            print("guessWord = ", guessWord)
            if guessWord:
                self.result["guessWord"] = guessWord
                # 获取百科结果
                self.__parseResult(r2)
            else:
                return

        except Exception as e:
            print("tryRequest Exception e", e)

    def __parseResult(self, r2):
        try:
            # 用beatifulsoup解析
            # 在python3中缺省的编码是unicode, from_encoding="utf-8", 会被忽视掉
            soup = BeautifulSoup(r2, "html.parser")
            baike_result = soup.find("div", id="guessBaike")
            # print("baike = ", baike_result)
            if not baike_result:
                print("没有默认百科结果 !!!")
                # 通过get请求再查询一遍
                # https://baike.baidu.com/item/
                bk = requests.get("https://baike.baidu.com/item/" +
                                  self.result["guessWord"], headers=headers, timeout=5)
                bk.encoding = "utf-8"

                # hdoc = ""
                hdoc = bk.text

                # print("bk.url =",bk.url)

                # if bk.url :
                #     # 需要再跳转一次
                #     bkr = requests.get(bk.url,headers=headers,timeout=5)
                #     bkr.encoding="utf-8"
                #     if bkr.text:
                #         hdoc = bkr.text
                # else:
                #     hdoc = bk.text

                soup = BeautifulSoup(hdoc, "html.parser")
                des = soup.find("meta", attrs={"name": "description"})
                print(des["content"])
                if des["content"]:
                    # result["details"]["synop"] = des["content"]
                    resultBaike = {"similarity": "",
                                   "name": "", "birth": "", "synop": des["content"]}
                    self.result["details"] = resultBaike #.replace("\r\n","").replace(" ","")
                # 后面需要详细再来解析
                # <div class="lemma-summary" label-module="lemmaSummary">
                # des2 = soup.find_all("div",attrs={"class": "para","label-module":"para"})
                # print(des2)
            else:
                similarity = baike_result.find(
                    "div", class_="guess-newbaike-left-similarity").get_text()
                name = baike_result.find(
                    "a", class_="guess-newbaike-name").get_text()
                if name :
                    if self.result["guessWord"] != name:
                        self.result["guessWord"] = name + " " + self.result["guessWord"]

                birth = baike_result.find(
                    "span", class_="guess-newbaike-text-birth-span").get_text()
                synop = baike_result.find(
                    "span", class_="guess-newbaike-text-birth-span").find_next("p").get_text()

                resultBaike = {"similarity": similarity,
                               "name": name, "birth": birth, "synop": synop}
                self.result["details"] = resultBaike

        except Exception as e:
            print("paser result exception e", e)
