
import time
from lxml import etree
from xpinyin import Pinyin
import queue
import os
import requests
from tqdm import tqdm
from util import mongo_manager
from retrying import retry
images_url = mongo_manager('images_url',db = 'cl_data')
from wordslist import activities_in_car

# 全局变量定义区，可能会被更改
# -------------------------------------------------------------------
g_img_progress_queue = queue.Queue(30)
g_down_status_queue = queue.Queue(10)
g_log_queue = queue.Queue(30)
# -------------------------------------------------------------------


# 下载并保存图片
class Download():
    def __init__(self) -> None:
        self.headers = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Pragma": "no-cache",
            "Sec-Fetch-Dest": "document",
            "Sec-Fetch-Mode": "navigate",
            "Sec-Fetch-Site": "same-origin",
            "Sec-Fetch-User": "?1",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
            "sec-ch-ua": "\"Google Chrome\";v=\"129\", \"Not=A?Brand\";v=\"8\", \"Chromium\";v=\"129\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "\"Linux\""
        }
        self.cookies = {
            "_c_WBKFRo": "mrXxUXpvbV0eizhTxBrg65Q0OYLVDlBkhqITuUtE",
            "api_token": "ST-438-9fb5fdfdf45cca011f22435bb48753086",
            "abBoss3": "1.0",
            "name": "15801366532",
            "sensorsdata2015jssdkcross": "%7B%22distinct_id%22%3A%22a59c4e75d4cfe8b0fff93f526f76e2323%22%2C%22first_id%22%3A%2219289ac39696e0-0e914ffee05328-1e462c6f-2073600-19289ac396a6a5%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22%24latest_referrer%22%3A%22%22%7D%2C%22%24device_id%22%3A%2219289ac39696e0-0e914ffee05328-1e462c6f-2073600-19289ac396a6a5%22%7D",
            "clientIp": "112.14.35.253",
            "uuid": "f07da6b6-fa3e-4b50-87c0-995fe8d4b2cb",
            "Hm_lvt_5fd2e010217c332a79f6f3c527df12e9": "1728886618,1728890134,1729128114",
            "Hm_lpvt_5fd2e010217c332a79f6f3c527df12e9": "1729128114",
            "HMACCOUNT": "8A7B5B515FA69801",
            "Hm_up_5fd2e010217c332a79f6f3c527df12e9": "%7B%22uuid%22%3A%7B%22value%22%3A%22f07da6b6-fa3e-4b50-87c0-995fe8d4b2cb%22%2C%22scope%22%3A1%7D%2C%22uid_%22%3A%7B%22value%22%3A%22a59c4e75d4cfe8b0fff93f526f76e2323%22%2C%22scope%22%3A1%7D%2C%22userId%22%3A%7B%22value%22%3A%22a59c4e75d4cfe8b0fff93f526f76e2323%22%2C%22scope%22%3A1%7D%7D",
            "fingerprint": "e96543d5055da432f6047d1a117c6672",
            "acw_tc": "276082a817291429517788069e294c0a50bb5a2e5d03ed2de23c57cf938fdf",
            "acw_sc__v2": "6710a0acb669ae6e5c0c748b2d9102501ce759b8",
            "_fp_": "eyJpcCI6IjExMi4xNC4zNS4yNTMiLCJmcCI6ImU5NjU0M2Q1MDU1ZGE0MzJmNjA0N2QxYTExN2M2NjcyIiwiaHMiOiIkMmEkMDgkZDIzYkZJbWdvbWRHRU9VNTVYM3AyT2NrbUcyR1NoZ3ZRTzZxQk9BSlA4aEpSMmZ0WmtCNTIifQ%3D%3D",
            "ssxmod_itna": "eqRxnDuD9W9DBDl4Yq0PDQcOCDUObQQZQQGnrGixWTyP5DsqLrDSxGKidDqxBWWtqNz=gRmaHF64KbaP33MRiONFEDFfo0InxI4qW+DGoDEx0=DcxDNDGA/h4DsZ=+eDxcq0r+xWekDAnqDFxibDieh929tDlKDRxDCN4i7Di5OQ8KDj0KDrh9mDiIwQRKDd=rrHD0dDDcte4iOL40WnRyeC40TiKLOM2Ixaj4KDm4Qg2YwDCKDjPdke8KDwaxGqz7+d/Amt8oIIaQPKjP7XlRbID2xHjD7tn+r1W4=Pnr4TAi51WseXQGV8cDUDDdxPIGDD",
            "ssxmod_itna2": "eqRxnDuD9W9DBDl4Yq0PDQcOCDUObQQZQQGnrGixWTyPikXL5DlhtxjRuO/Gx2P4GFrkPqCD8r2qCYQiu2GmT/hkRp51AhCadCb=APO+tBUne2FeHW7Ugm6Wks9gFYLj9loRrsNF+38lhUuQDts8k4EiY=TQ0fPs4xE0LtmiYorAnG5+d5meg66FgTTnuUcRmGIW0tsRmoKySh9DptBKqN/3gLqGuoEBq55ArxPF6w5LLwxtMh5=B48D45I6+D4ek+Lb8pP50T1aDTOxp2mf4xCq9ET2CQVn/jky8lEiknaEvLQ22Y0xpk+ILI4g9TAUmRbw6IbpF77I+46T4iHptAZBuf3vIRWOjmY3qAnB4cPgFHVjezFnoNuDl9e1Tgt4gF7YluEibO9Hp0ER05b7+PhoX9AW89NkTkjegKKB4IWKhnDa6YvYKcvAE1U0wWRNoDzAqrDPTAuTWiO32f099YGGS+a+PrID40w5sR3Apa0uQA24Fce8T7eno+En9Fci8b0mw9+qioUIrIYnFSiYvMNreSWI7P2MDrTeMa/nejdfFp0WvL03tcpEfuOBwRFH6YfW5i6m=QgvpAbbWSz+Fy0jUCAFXyKh1FPI0FhnBfWirve+IOa2Qm0OKouxEk+7jHf3vx0qyfuC/RPixW4E5c4hXe/9F6F2wU1kxPXDatS44zxDKkkoGGDafnqCx7bnAP5a4nb72g1km+doGAx8EDZG5le5c8j3t48jKbAK/P6ToY=5aoWHsGKr4zYX7+aq3/Pwi4zKDAdb3quNCx5b0ADNbprr+HLAG=vdzt=uP0K7Y7QmaFrF4D08DG7bu8YdWrMoov0n0D4+iYYkqWBxihjQ+e2qi4ZaF94qKoF4ZcaWeKx=QHa7kcxEx4m44+BoDD=="
        }
        self.format = 'jpg'

    # 保存图片到本地
    def save_img_to_local(self,keyword,pages):
        start =time.time()
        tmp_src = self.parse_web(keyword, page=pages)
        # 图片保存路径，根据需要更改
        save_file_path='/media/chenglei3/77D014CED257D1411/images/vcg/{}/'.format(keyword)
        if not os.path.exists(save_file_path):
            os.makedirs(save_file_path)
        i=1
        print('图片数:',len(tmp_src))
        for src in tqdm(tmp_src):
            href = 'https:'+src
            if images_url.findOne({'_id': href, 'status': 'done'}):
                i += 1
                continue
            reslut = {'_id': href, 'platform': 'vcg', 'keyword': keyword}
            try:
                content = requests.get(href,headers=self.headers).content
                s = requests.session()
                s.close()
                filename = '{0}{1}_{2}.{3}'.format(save_file_path,keyword,i,self.format)
                with open (filename,'wb') as f:
                    f.write(content)
                    f.close()
                reslut['status'] = 'done'
                # time.sleep(0.2)
            except Exception as e:
                print(e)
                reslut['status'] = 'fail'
            try:
                images_url.insertOne(reslut)
            except:
                images_url.updateOne({'_id': href}, reslut)
            i += 1
        end = time.time()
        print('\n --------> {}全部下载完成 <-------- \n耗时: {}s'.format(keyword,round(end-start,3)))

    # 解析网页的数据，拿到所有的图片url
    def parse_web(self,kwyword,page=2):
        tmp_src = []
        try:
            keyword_trans_pinyin = int(kwyword)
        except:
            translator = Pinyin()
            keyword_trans_pinyin = translator.get_pinyin(kwyword,'').strip()
            keyword_trans_pinyin = str.upper(keyword_trans_pinyin)
        try:
            for i in range(1,page+1):
                url="https://www.vcg.com/creative-photo/{0}/?page={1}".format(kwyword,str(i))
                # request=urllib.request.Request(url=url,headers=self.headers)
                # time.sleep(0.2)
                # response=urllib.request.urlopen(request,timeout=10)
                # content=response.read().decode("utf-8")
                response = requests.get(url, headers=self.headers,cookies=self.cookies, timeout=10)
                content = response.text
                time.sleep(0.2)
                tree=etree.HTML(content)
                src_list=tree.xpath('//div[@id="root"]//div[@class="gallery_inner"]//figure/a/img/@data-src')
                tmp_src += src_list
                # yield src_list
                s = requests.session()
                s.close()
        except:
            pass
        return tmp_src

if __name__ =='__main__':
    download = Download()
    successlist = []

    activities_in_car = list(set(activities_in_car))
    people_keywords = [
        "中国婴儿", "车内中国婴儿",
        "中国儿童男", "车内中国儿童男",
        "中国儿童女", "车内中国儿童女",
        "中国成年人男", "车内中国成年人男",
        "中国成年人女", "车内中国成年人女",
        "中国老年人男", "车内中国老年人男",
        "中国老年人女", "车内中国老年人女"
    ]
    for word in people_keywords[:]:
        print('begin:',word)
        pages = 15 #需要采集的页码
        download.save_img_to_local(word, pages)
        successlist.append(word)
    print('successlist:',successlist)
    print("Done")



