# -*- coding: utf-8 -*-

"""
通过将图片转成百度图片搜索的格式,
发起请求,将图片检索结果封装为json返回
{
    "guessWord":"图片检索结果关键字",
    "details":{
         
    }
}
"""
import requests
import json
import re
from bs4 import BeautifulSoup
import urllib.parse as urlparse

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
}

source = "http://pic.sogou.com"
upload_url = source + "/ris_upload"
query_url = source + "/ris?"

baike_url = "http://pic.sogou.com/ris/baike_result.jsp?query="


class SogouGuessImg(object):

 

    def __init__(self, name, fpath):
        self.imgpath = fpath
        self.imgname = name
        self.imgqurl_dict = {1: "", 2: "", 3: ""}
        self.result = {"guessWord": "", "details": "","from":"sogou"}

    def tryGuess(self):
        """
        通过图片获取检索关键字
        """
        if self.imgpath == None or self.imgname == None:
            print("img name or path is none!")
            return None
        self.__tryRequest()
        return self.result

    def __tryRequest(self):
        print("try request sogou")

        try:
            files = {"file": (self.imgname, open(
                self.imgpath, "rb"), "image/jpeg"), "flag": (None, "1")}

            r = requests.post(upload_url, headers=headers,
                              files=files, timeout=10)

            qurl = r.url
            print("qurl = ", qurl)
            qs = urlparse.urlparse(qurl).query
            qargs = dict([(k, v[0]) for k, v in urlparse.parse_qs(qs).items()])
            print("args = ", qargs)

            if("query" not in qargs or "flag" not in qargs):
                print("url err: no query arg !")
                return

            # 目前不是很清楚搜狗获取的策略是怎么进行的,初步猜测如下
            # 1,图片上传完后会发起3次get请求
            # 第一次,猜测是精确匹配
            # 第二次,猜测是简单匹配
            # 第三次,猜测是模糊匹配
            # 所以这边根据猜测采取策略如下
            # 如果精确匹配到了结果,则取精确结果中的entity,如果没有则继续发起第二次请求,以此类推

            self.imgqurl_dict[1] = query_url + "query=" + \
                qargs["query"] + "&flag=1&reqType=ajax&st=12&reqForm=result"

            self.imgqurl_dict[2] = query_url + "query=" + \
                qargs["query"] + "&dm=0&reqType=ajax&tn=0&reqForm=result"

            self.imgqurl_dict[3] = query_url + "query=" + \
                qargs["query"] + "&flag=1&reqType=ajax&reqForm=result"

            entity = ""
            i = 0
            while (not entity) and i < 3:
                i = i+1
                try:
                    ir = requests.get(
                        self.imgqurl_dict[i], headers=headers, timeout=5).text

                    tj = json.loads(ir)
                    entity = tj["entity"]
                    print("entity ============== ", entity)
                except Exception as e:
                    print("e = ", e)

            if entity:
                self.result["guessWord"] = entity

                # 有时候entity会带“|” 或 ‘ ’ 多个关键字的情况，导致百科搜索不到结果情况
                ets = re.split("\|| ", entity)
                print("ets = ", ets)
                sogou_baike = ""
                i = 0
                while (not sogou_baike) and i < len(ets):
                    try:
                        sogou_baike = requests.get(
                            baike_url+ets[i], headers=headers, timeout=5).text
                    except Exception as e:
                        print("e", e)

                    i = i + 1

                # 判断百科结果是否有效
                bkj = json.loads(sogou_baike)
                # print("bkj ===============",bkj)
                if bkj["link"] and bkj["content"]:
                    self.result["details"] = sogou_baike.replace("\r\n","").replace(" ","")

        except Exception as e:
            print(" Exception e", e)
