from Spider.Base import Base


class BaiduImageSpider(Base):
    def __init__(self,logger,proxy_radio=-1):
        super(BaiduImageSpider, self).__init__(logger, proxy_radio)
        self.name = "BaiduImage"
        self.ch_name = "百度图片"
        self.platform_id = 1


    def __searchImageArrayWithPage(self, keyword, page=1):
        state = False
        imageArray = []

        rn = 30  # 当前页获取的数据的长度
        pn = page * rn  # 当前页获取的数据末尾的长度

        urlParams = [
            ("tn","resultjson_com"),
            ("logid","12175815090078773918"),
            ("ipn","rj"),
            ("ct","201326592"),
            ("is",""),
            ("fp","result"),
            ("fr",""),
            ("word",keyword),
            ("queryWord",keyword),
            ("cl","2"),
            ("lm","-1"),
            ("ie","utf-8"),
            ("oe","utf-8"),
            ("adpicid",""),
            ("st","-1"),
            ("z",""),
            ("ic","0"),
            ("hd",""),
            ("latest",""),
            ("copyright",""),
            ("s",""),
            ("se",""),
            ("tab",""),
            ("width",""),
            ("height",""),
            ("face","0"),
            ("istype","2"),
            ("qc",""),
            ("nc","1"),
            ("expermode",""),
            ("nojc",""),
            ("isAsync",""),
            ("pn",str(pn)),
            ("rn",str(rn)),
            ("gsm","1e"),
            ("1640315984319",""),
        ]

        url = "https://image.baidu.com/search/acjson"

        index = 0
        for k,v in urlParams:
            if 0==index:
                url+="?%s=%s"%(k,v)
            else:
                url+="&%s=%s" % (k, v)
            index+=1

        headers = {
            "Content-Type": "text/html; charset=UTF-8"
        }
        res = self.get(self.getCurrentFuncName(),url,headers)
        if res:
            content_len = len(res.text)
            if content_len > 1000:
                try:
                    data = res.json()

                    img_data = data.get("data")

                    for img_item in img_data:
                        url_source = img_item.get("thumbURL")
                        url_source_org = img_item.get("fromURLHost")
                        url_source_date = img_item.get("bdImgnewsDate")
                        name = img_item.get("fromPageTitleEnc")

                        if url_source and url_source_org and url_source_date:

                            imageArray.append({
                                "url_source":url_source,
                                "url_source_org":url_source_org,
                                "url_source_date":url_source_date,
                                "name":name,
                            })
                    if len(imageArray) > 0:
                        state = True
                    else:
                        self.logger.error("keyword=%s,page=%d,imageArray=0|%s" % (keyword, page, url))
                except Exception as e:
                    self.logger.error("keyword=%s,page=%d,%s|%s" % (keyword,page,str(e), url))
            else:
                self.logger.error("keyword=%s,page=%d,content_len=%d|%s" % (keyword,page,content_len,url))

        return state,imageArray

    def searchImageArray(self,keyword,pageCount):
        """

        :param keyword:
        :param pageCount:
        :return:
        [
            {'url_source': 'https://img0.baidu.com/it/u=3796985805,1597574221&fm=26&fmt=auto', 'url_source_org': 'www.go007.com', 'url_source_date': '2010-07-15 18:48', 'name': '唐山市利明医院'}
            {'url_source': 'https://img0.baidu.com/it/u=1890182711,3386363198&fm=26&fmt=auto', 'url_source_org': 'tsdsfs.com', 'url_source_date': '2010-02-18 00:00', 'name': '唐山市政府'}
        ]

        """

        s = set()
        effective_count = 0
        repeat_count = 0
        imageArray = []

        for page in range(1, pageCount+1):
            _state, _imageArray = self.__searchImageArrayWithPage(keyword=keyword, page=page)
            if _state:
                for d in _imageArray:
                    if d.get("url_source") not in s:
                        effective_count +=1
                        imageArray.append(d)
                        s.add(d.get("url_source"))
                    else:
                        repeat_count += 1
            else:
                break

        # imageArray = sorted(imageArray,key=lambda x:x.get("url_source_date"),reverse=True) # 时间从最新到之前排序
        imageArray.sort(key=lambda x:x.get("url_source_date"),reverse=True)# 时间从最新到之前排序
        s.clear()
        self.logger.info("keyword=%s,pageCount=%d,effective_count=%d,repeat_count=%d"%(keyword,pageCount,effective_count,repeat_count))
        return imageArray



if __name__ == '__main__':
    import logging
    logger = logging.getLogger()

    imageSpider = BaiduImageSpider(logger=logger, proxy_radio=-1)

    imageArray = imageSpider.searchImageArray(keyword="唐山市唐山体育中心", pageCount=5)

    print("共计获取：",len(imageArray))
    for d in imageArray:
        print(d)



