from threading import Thread

import requests
import re
import os
import random
from datetime import datetime
# from threading import Thread
from multiprocessing.pool import ThreadPool
import json
import datetime

import read_excel

class DownloadPictureSouGou:

    def __init__(self):

        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.6776.400 QQBrowser/10.3.2577.400'
        }
    def request_data(self,start,lenght=100):
        url = 'https://pic.sogou.com/napi/pc/searchList?mode=1&start={0}&xml_len={1}&query={2}&rawQuery={2}&st=255'.format(start,lenght,self.keyword)
        resp = requests.get(url,headers=self.headers).text
        # print(resp)
        return json.loads(resp)

    def num_spider(self,keyword,picture_number): #精确爬取数量，若设定的爬取量超出100张，则每次请求获取100张image链接，请求次数 = 设定值//100 + 1 ,如设定值560张，则先请求5次100张，最后一次请求60张，若设定的爬取量不足100，则按设定值爬取
        self.keyword = keyword
        self.picture_number = picture_number
        img_urls = []
        if self.picture_number>100:
            for i in range(self.picture_number // 100+1):
                if i ==  self.picture_number // 100:
                    items = self.request_data(i * 100 + 1, self.picture_number - ((self.picture_number // 100) * 100))
                else:
                    items = self.request_data(i * 100 + 1)
                try:
                    if not items.get('data'):
                        break
                    img_urls.extend(self.ruler_url(items))
                except Exception as e:
                    print('无照片数量')
                    break
        else:
            items = self.request_data(1,self.picture_number)
            img_urls.extend(self.ruler_url(items))
        return img_urls

    def add_name(self,li,keyword):
        lis = []
        for i in li:
            i = i + '♥{}'.format(keyword)
            lis.append(i)
        return lis

    def num_spider_two(self,keyword,picture_number=10): #精确爬取数量，若设定的爬取量超出100张，则每次请求获取100张image链接，请求次数 = 设定值//100 + 1 ,如设定值560张，则先请求5次100张，最后一次请求60张，若设定的爬取量不足100，则按设定值爬取
        self.keyword = keyword
        self.picture_number = random.randint(5,picture_number)
        img_urls = []
        items = self.request_data(0,self.picture_number)
        img_urls.extend(self.add_name(self.ruler_url(items)[0:self.picture_number],keyword))
        return img_urls

    def ruler_url(self,data):
        image_url_list = []
        for i in data.get('data').get('items'):
            image_url_list.append(i.get('oriPicUrl'))
        return image_url_list

    def repead(self,filename):
        list_name = os.listdir('D:\\project\\project_3\\picture2')
        if filename in list_name:
            st = round(datetime.datetime.now().timestamp() * 10 ** 6)
            img_name = '{}-{}.jpg'.format(self.keyword, st)
            return img_name
    def download_img(self,url):
        try:
            # file_dir = 'D:\\project\\project_4\\sogo_picture\\{}'.format(self.keyword)
            # img_url,filename = url.split('♥')
            file_dir = 'D:\\project\\project_6\\sogo_picture\\{}'.format(self.keyword)
            # st = round(datetime.datetime.now().timestamp() * 10 ** 6)
            # img_name = '{}-{}.jpg'.format(filename,st)
            file_name = os.path.join(file_dir, url.split('/')[-1])  # 文件路径
            # file_name = os.path.join(file_dir, img_name)  # 文件路径
            if not os.path.exists(file_dir):
                try:
                    os.makedirs(file_dir)
                except FileExistsError:
                    pass
            img = requests.get(url=url,headers=self.headers).content
            with open(file_name,'wb') as f:
                f.write(img)
            # print('{}--下载完成！'.format(url.split('/')[-1]))
        except Exception as e:
            print('{}-该图片下载失败！失败原因--{}'.format(url,e))

    def thread_pool_get_img(self,urls):
        pool = ThreadPool(10)
        for url in urls:
            # if re.findall('https://i\d+piccdn\.sogoucdn\.com/.*?',url):
            if url.split('.')[-1] in ['jpg','jpeg','png']:
                pool.apply_async(self.download_img,args=(url,))
        pool.close()
        pool.join()

    def thread_get_img(self,urls):
        for url in urls:
            # if re.findall('https://i\d+piccdn\.sogoucdn\.com/.*?',url):
            if url.split('.')[-1].split('♥')[0] in ['jpg','jpeg','png']:
                Thread(target=self.download_img,args=(url,)).start()

    def run_1(self,keyword,num_page=10):
        urls = self.num_spider(keyword,num_page)
        print('已获取{}个相关照片链接...'.format(len(urls)))
        self.thread_get_img(urls)
        # self.thread_pool_get_img(urls)

    def run(self,content):
        # urls = self.num_spider(keyword)
        url_list = []
        print('正在获取照片连接中...')
        for i in content:
            try:
                urls = self.num_spider_two(i)
                # ram = random.randint(0, len(urls) - 1)
                # a = urls.pop(ram)
                # ram = random.randint(0, len(urls) - 1)
                # b = urls.pop(ram)
                # if random.randint(1, 100) > 70:
                #     url_list.extend([a, b])
                # else:
                #     url_list.append(a)
                url_list.extend(urls)
            except Exception as e:
                print(e)
                continue
            # print('已获取{}个相关照片链接...'.format(len(urls)))
        print(url_list)
        print('url_list长度是：')
        print(len(url_list))
        self.thread_get_img(url_list)

if __name__ == '__main__':
    dp_sg = DownloadPictureSouGou()
    # content = read_excel.read_xlrd(read_excel.dir_name)
    # dp_sg.run(content)
    dp_sg.run_1('红军大刀',2000)


