from fake_useragent import UserAgent
import re
import requests
import pymongo
from hashlib import md5
from multiprocessing import Pool

ua = UserAgent(use_cache_server=True)


class Campus_BelleSpider(object):
    client = pymongo.MongoClient('localhost')
    db = client['xiaohua']
    headers = {'User-Agent': '{}'.format(ua.random)}

    # def __init__(self):
    # self.client = pymongo.MongoClient('localhost')
    # self.db = self.client['xiaohua']
    #     # self.headers = {'User-Agent': '{}'.format(ua.random)}
    #     # self.num = 0

    def get_total_page(self):
        response = (requests.get('http://www.xiaohuar.com/list-1-0.html', headers=self.headers)).text
        page_num = re.findall(re.compile(r'<div class="page_num">.*?<a.*?<b>(.*?)</b>', re.S), response)
        # print(page_num)
        return int(page_num[0])

    def get_detail_html(self, page):
        # picture_list = []
        # while self.num <= page:
        #     self.num = self.num + 1
        print("正在查询第{}页的内容:".format(page))
        response = requests.get('http://www.xiaohuar.com/list-1-{}.html'.format(page)).text
        info = re.findall(re.compile(r'<img width="210".*?"(.*?)".*?"(.*?)"', re.S), response)
        # print(info)
        for pic_list in info:
            # print(pic_list[0])
            if "http" not in pic_list[1]:
                new_picture_url = "http://www.xiaohuar.com{}".format(pic_list[1])
                picture_dict = {pic_list[0]: new_picture_url}
                # self.db['xiaohua'].insert_one(picture_dict)
                print(pic_list[0])
                print(new_picture_url)
                # picture_list.append(new_picture)
                self.download_image(new_picture_url)
            else:
                # print(pic_list[1])
                picture_dict = {pic_list[0]: pic_list[1]}
                print(pic_list[0], pic_list[1])
                self.download_image(pic_list[1])

                # self.db['xiaohua'].insert_one(picture_dict)
                # picture_list.append(pic_list[1])
        # for picture_url in picture_list:
        #     print(picture_url)

    def download_image(self, url):
        response = requests.get(url, headers=self.headers)
        if response.status_code == 200:
            # response.text: 获取的是文本资源；(json字符串、网页源代码)
            # 但是图片属于二进制资源，图片数据的传输是以二进制流的形式传输的，不再是字符串了。
            content = response.content
            # md5()函数的参数需要是一个bytes字节码，不能是str类型的字符串。
            # hexdigest(): 获取md5加密后的结果。
            img_name = md5(url.encode('utf-8')).hexdigest()

            # 'w': 写入普通文本；'wb': 专门用于写入二进制数据(图片、音频、视频)
            f = open('E:/imgs/{}.jpg'.format(img_name), 'wb')
            f.write(content)
            f.close()
        else:
            print('图片请求失败：{}'.format(url))

    def start_spider(self, page):
        # page = self.get_total_page()
        self.get_detail_html(page)


if __name__ == '__main__':
    cb = Campus_BelleSpider()
    # cb.get_total_page()
    # pic = cb.get_detail_html()
    # cb.modify_url(pic)
    pool = Pool(1)
    pool.map(cb.start_spider, [x for x in range(0, 1092)])
    pool.close()
    pool.join()












