import threading
import time
import urllib
import urllib.request

import requests
from lxml import etree
from xpinyin import Pinyin
import queue


# 全局变量定义区，可能会被更改
# -------------------------------------------------------------------
g_img_progress_queue = queue.Queue(30)
g_down_status_queue = queue.Queue(10)
g_log_queue = queue.Queue(30)
# -------------------------------------------------------------------


# 下载并保存图片
class Download():
    def __init__(self, key_word, pages, format) -> None:
        self.key_word = key_word
        self.pages = pages
        self.format = format
        self.save_file_path = None
        self.img_progress_queue = None
        self.child_process_status_queue = None
        self.child_process_log_queue = None
        self.pinyin = Pinyin() 
        self.headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)\
             Chrome/106.0.0.0 Safari/537.36'
        }
        self.cookies = {
            "sajssdk_2015_cross_new_user": "1",
            "sensorsdata2015jssdkcross": "%7B%22distinct_id%22%3A%2219289ac39696e0-0e914ffee05328-1e462c6f-2073600-19289ac396a6a5%22%2C%22first_id%22%3A%22%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22%24latest_referrer%22%3A%22%22%7D%2C%22%24device_id%22%3A%2219289ac39696e0-0e914ffee05328-1e462c6f-2073600-19289ac396a6a5%22%7D",
            "_c_WBKFRo": "mrXxUXpvbV0eizhTxBrg65Q0OYLVDlBkhqITuUtE",
            "acw_tc": "1a0c651a17288901254385874e00b59a70fa9badc7391070a609158726f413",
            "acw_sc__v3": "670cc513081d9bb7785b5b5a59fc7709a28022aa",
            "clientIp": "112.14.35.253",
            "uuid": "f4677136-4940-480f-8082-f01d1e30438a",
            "Hm_lvt_5fd2e010217c332a79f6f3c527df12e9": "1728886618,1728890134",
            "Hm_lpvt_5fd2e010217c332a79f6f3c527df12e9": "1728890134",
            "HMACCOUNT": "8A7B5B515FA69801",
            "Hm_up_5fd2e010217c332a79f6f3c527df12e9": "%7B%22uuid%22%3A%7B%22value%22%3A%22f4677136-4940-480f-8082-f01d1e30438a%22%2C%22scope%22%3A1%7D%2C%22uid_%22%3A%7B%22value%22%3A%22%22%2C%22scope%22%3A1%7D%7D",
            "fingerprint": "dfa1987ac0f5e3df5d8e9d679732c8f5",
            "_fp_": "eyJpcCI6IjExMi4xNC4zNS4yNTMiLCJmcCI6ImRmYTE5ODdhYzBmNWUzZGY1ZDhlOWQ2Nzk3MzJjOGY1IiwiaHMiOiIkMmEkMDgkdmpDU0g1SnRNay92Y1pNWTlyMWNVT3BXVWhsVDNtSlFXT09zblRFNU9zMEtMTC9LZGZaVGUifQ%3D%3D",
            "ssxmod_itna": "eqAOq0xGxhOG8DgDl4Yq0Pc6t5Gkfii+WK8fWYvYiBKDBkBO4iNDnD8x7YDvI0OE0++3NWlB2dw=SexaQK3+x0IEbZeobj30irGRGoD74i8DCqi1D0qDYfrQP0HToQx2eDx2q0r+xWekDG4ND7PDoxDrK+5kSD0kDY5D0PoDhxD1mgAOD04kDeP8a4DOiSYkDjWPAADDXDDWS7YDpc3DTOLsaBoDNogjIc5keC2qODi3Xjvwt40OD09jjOAODQ9P4F7+d97mW4oI8892Fi0daWvi4pmooexx3nD1LYw5eg2xuQiezi0oTgxNAXo0ecZfxW+teIaoD===",
            "ssxmod_itna2": "eqAOq0xGxhOG8DgDl4Yq0Pc6t5Gkfii+WK8fWYvYifD8u6OD0IKD/QNDFE+iryWmxal7wGtD6QBHqP9/El7wkivEL2QkYsmWSq6Zb9i5hX3wAwg=pVWLZT8Rqn/A48p=iDmcSxNGFxHiivzYa2DSQTF32PbwrDx8grbYtpFwOBaPgYIq2uqLr07PiKxrGw5FP2reWWxSouat6n7Aow=59Y8uBSh2cirNWgau1WPHPg2Y/im6rIT3PHPuG0T2gupWIAO5VwA8hKTv+maYSyCtuLcwOoTHrPNDpRkD2TG81ma=eOnSXKHT47HSY0RQoB7CxbG2qYb3LE4aDTxmsnB7tbiVq3=0FyAiDqOBAT0f4BR+a8TY3TzA832+=r54okK8OdfDMRzTM=MMTT4xqY4sge5o3HodP7AMle9DWo2HMu8jg3mWwDEtB7A9RAoAdSEQB0voOK4Sdq23Y2WeqC34Y/g3rYeGRKz7bmb4b3tP0QWT508CIcTu0oLfEa3bdOAhGQKP+ogxFgxPh2Bg1BBtni59pYq6hc7Ut2qwxxon0ofALT2yasce=j+mvEz3nrS2e79GZmQh3z2mCohcuoTT9brwdqOpBqYw2tXg72RLtbzbpz04zTwRvZTGQTnKdnkMjCWjMKKquyA6V0fN5onZFvaxT3i7zUrto1ppqk3UDDwgwX6whADpO6UvpG6iGRgxnwO7aQQuoxt0OTx3XsBD+GOkKimiA42hKj4x+dWztP4CYlxzY42Qs+xx+zB5ih8OterW0CzDUGYW5VdKL3U7e0hud4iwpAvWijYPpGsnZitPDDFqDeuolhCfD0xkBK4xqDHexCB4Vf=Wx+eNYev4bf5mbb3Y+bdOtYoGfdqfXA4eD==="
        }

    # 保存图片到本地
    def save_img_to_local(self):
        tmp_src = self.parse_web()
        start =time.time()
        i=1
        for src in tmp_src:
            name  = '{0}{1}_{2}'.format(self.save_file_path,self.key_word,src.split('/')[-1])
            urllib.request.urlretrieve('https:'+src,name)
            self.trans_log_to_ui('<-------- {0}'.format(name))
            self.update_img_progress(int(round(i/len(tmp_src),3)*100))
            i += 1
            time.sleep(0.2)
        end = time.time()
        self.trans_log_to_ui('\n --------> 全部下载完成 <-------- \n耗时: {0}s'.format(round(end-start,3)))
        g_down_status_queue.put('down_sucess')

    # 解析网页的数据，拿到所有的图片url
    def parse_web(self):
        tmp_src = []
        keyword_trans_pinyin = self.pinyin.get_pinyin(self.key_word,'')
        keyword_trans_pinyin = str.upper(keyword_trans_pinyin)
        try:
            for page in range(1,int(self.pages)+1):
                url="https://www.vcg.com/creative-photo/{0}/?page={1}".format(self.key_word,str(page))
                # request=urllib.request.Request(url=url,headers=self.headers)
                # time.sleep(0.2)
                # response=urllib.request.urlopen(request,timeout=10)
                # content=response.read().decode("utf-8")
                self.trans_log_to_ui("开始第{0}页请求 -------->".format(str(page)))

                self.trans_log_to_ui("获取第{0}页网页源码 -------->".format(str(page)))

                response = requests.get(url, headers=self.headers,cookies=self.cookies, timeout=10)
                content = response.text
                time.sleep(0.2)
                tree=etree.HTML(content)
                src_list=tree.xpath('//div[@id="root"]//div[@class="gallery_inner"]//figure/a/img/@data-src')
                tmp_src += src_list
                self.trans_log_to_ui(' --------> 抓取图片总数量：{}张 <--------'.format(len(tmp_src)))

                # yield src_list
                s = requests.session()
                s.close()
        except:
            pass
        return tmp_src

    # 子线程通知子进程要显示信息在UI上
    def trans_log_to_ui(self, msg):
        g_log_queue.put(msg)

    # 子线程通知子进程进度条清空
    def update_img_progress(self,val:int):
        g_img_progress_queue.put(val)


# 用于管理下载图片
class Child_Process():
    def __init__(self,key_word, pages, format) -> None:
        self.print_log_signal = None
        self.img_progress_signal = None
        self.down = Download(key_word, pages, format)

    # 子进程处理解析网页的数据和下载图片
    def process_work(self, queue1, queue2, queue3, save_file_path):
        self.down.save_file_path = save_file_path
        self.down.img_progress_queue = queue1
        self.down.child_process_status_queue = queue2
        self.down.child_process_log_queue = queue3

        self.img_progress_queue = queue1
        self.child_process_status_queue = queue2
        self.child_process_log_queue = queue3

        _thread_save_img = threading.Thread(target=self.down.save_img_to_local, daemon=True)
        _thread_save_img.start()
        while True:
            if not g_log_queue.empty():
                self.child_process_log_queue.put(g_log_queue.get())
            if not g_img_progress_queue.empty():
                self.img_progress_queue.put(g_img_progress_queue.get())
            if not g_down_status_queue.empty():
                self.child_process_status_queue.put(g_down_status_queue.get())
            time.sleep(0.2)


if __name__ =='__main__':
    pass

