import threading
import time
import urllib
import urllib.request
from lxml import etree
from xpinyin import Pinyin
import queue
import os
from wordslist import animalslist,jililists,plantlist
import requests
from tqdm import tqdm

# 全局变量定义区，可能会被更改
# -------------------------------------------------------------------
g_img_progress_queue = queue.Queue(30)
g_down_status_queue = queue.Queue(10)
g_log_queue = queue.Queue(30)
# -------------------------------------------------------------------


# 下载并保存图片
class Download():
    def __init__(self) -> None:
        self.headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)\
             Chrome/106.0.0.0 Safari/537.36'
        }
        self.format = 'jpg'

    # 保存图片到本地
    def save_img_to_local(self,kwyword,pages):
        start =time.time()
        # print(pinyin)
        tmp_src = self.parse_web(kwyword, page=pages)
        save_file_path='/home/chenglei3/work/2023.01/viewchina/jili3/{}/'.format(kwyword)
        if not os.path.exists(save_file_path):
            os.makedirs(save_file_path)
        i=1
        print('图片数:',len(tmp_src))
        for src in tqdm(tmp_src):
            try:
                # urllib.request.urlretrieve('https:'+src,'{0}{1}_{2}.{3}'.format(save_file_path,kwyword,i,self.format))
                href = 'https:'+src
                content = requests.get(href,headers=self.headers).content
                s = requests.session()
                s.close()
                filename = '{0}{1}_{2}.{3}'.format(save_file_path,kwyword,i,self.format)
                with open (filename,'wb') as f:
                    f.write(content)
                    f.close()
                i += 1
                # time.sleep(0.2)
            except:
                pass
        end = time.time()
        print('\n --------> {}全部下载完成 <-------- \n耗时: {}s'.format(kwyword,round(end-start,3)))



    # 解析网页的数据，拿到所有的图片url
    def parse_web(self,kwyword,page=2):
        tmp_src = []
        try:
            keyword_trans_pinyin = int(kwyword)
        except:
            translator = Pinyin()
            keyword_trans_pinyin = translator.get_pinyin(kwyword,'').strip()
        try:
            for i in range(1,page+1):
                url="https://www.vcg.com/creative-photo/{0}/?page={1}".format(keyword_trans_pinyin,str(i))
                # request=urllib.request.Request(url=url,headers=self.headers)
                # time.sleep(0.2)
                # response=urllib.request.urlopen(request,timeout=10)
                # content=response.read().decode("utf-8")
                response = requests.get(url, headers=self.headers, timeout=10)
                content = response.text
                time.sleep(0.2)
                tree=etree.HTML(content)
                src_list=tree.xpath('//div[@id="root"]//div[@class="gallery_inner"]//figure/a/img/@data-src')
                tmp_src += src_list
                s = requests.session()
                s.close()
        except:
            pass
        return tmp_src



if __name__ =='__main__':
    download = Download()
    successlist = []

    words = plantlist
    testwords =['中国结','汽车','北京大学']
    word_20230215=['灯笼','兔子','龙']
    word_20230215_1=['中国结','福','花窗','平安扣','折扇']
    word_20230215_2 = ['元宝', '如意', '玉佩', '爆竹',]
    word_20230215_3=['佛像', '观音', '葫芦', '貔貅','关公','56592','招财猫'] #56592:金蟾
    for word in word_20230215_3:
        print('begin:',word)
        pages = 15
        download.save_img_to_local(word, pages)
        successlist.append(word)
    print('successlist:',successlist)
    print("Done")



