import os
import requests
from bs4 import BeautifulSoup
import os
from urllib import request
import time
import random
import multiprocessing

headers={'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 5.1; Win64; x64; rv:60.0) Gecko/20100101 Firefox/2.0.0.11'}
HOMEPAGE = 'https://fzfz9.com'
CATEGORT_TO_INDEX = {
    'toupaizipai': 'PIC01',
    'yazhousetu': 'PIC02',
    'oumeisetu': 'PIC03',
    'dongmantupian': 'PIC04',
    'meituisiwa': 'PIC05',
    'qingchunweimei': 'PIC06',
    'luanluntuqu': 'PIC07',
    'lingweitupian': 'PIC08'

}


class Fzfz9(object):
    def __init__(self, category='yazhousetu'):
        """
        category = qingchunweimei, meituisiwa,shunvluanlun
        """
        self.homepage = HOMEPAGE
        assert(category in CATEGORT_TO_INDEX.keys())
        self.category = category
        self.category_index = CATEGORT_TO_INDEX[category]
        self.res_encoding = 'utf8'
    
    def get_page_url(self, idx):
        url = '{}/{}/list_{}.html'.format(self.homepage, self.category_index, idx)
        return url

    def run(self, save_root, shuffle=False, start=1, end=133):
        # crawl all site
        save_folder = os.path.join(save_root, self.category)
        main_urls = [self.get_page_url(i) for i in range(start, end)]
        for i, page_url in enumerate(main_urls):
            print('[PAGE]{}'.format(page_url))
            set_info = self.get_set_info(page_url)
            if shuffle:
                random.shuffle(set_info)

            for href, title, date in set_info:
                set_url = href
                set_folder = os.path.join(save_folder, date, title)
                if not os.path.exists(set_folder):
                    os.makedirs(set_folder)
                print('[SET]{}'.format(title))
                self.crawl_set(set_url, set_folder)
    
    def crawl_page(self, page_index, save_root):
        save_folder = os.path.join(save_root, self.category)
        page_url = self.get_page_url(page_index)
        print('[PAGE]{}'.format(page_url))
        set_info = self.get_set_info(page_url)
        for href, title, date in set_info:
            set_url = href
            set_folder = os.path.join(save_folder, date, title)
            if not os.path.exists(set_folder):
                os.makedirs(set_folder)
            print('[SET]{}'.format(title))
            self.crawl_set(set_url, set_folder)

    def get_set_info(self, url):
        set_info = []
        try:
            response = requests.get(url, headers=headers)
            response.encoding = self.res_encoding
            soup = BeautifulSoup(response.text, features='lxml')
            container = soup.find(attrs={'class': 'channel-list'})
            # print(container)
            for item in container.find_all('dt'):
                href = item.a['href']
                href = '{}/{}'.format(self.homepage, href)
                title = item.a['title']
                date = item.a.i.text
                set_info.append((href, title, date))
        except Exception as e:
            print('error: ' + str(e))
        return set_info
    
    def crawl_set(self, set_url, set_folder):
        try:
            response = requests.get(set_url, headers=headers)
            soup = BeautifulSoup(response.text, features='lxml')
            img_content = soup.find(attrs={'class': 'content'})
            if img_content is None:
                return
            for item in img_content.find_all('a'):
                img_url = item['href']
                filename = os.path.basename(img_url)
                if not os.path.exists(set_folder):
                    os.makedirs(set_folder)
                save_path = os.path.join(set_folder, filename)
                if not os.path.exists(save_path):
                    print('[IMAGE]{}'.format(filename))
                    self.download_image(img_url, save_path)
        except Exception as e:
            print('set error: ' + str(e))

    def download_image(self, image_url, save_path):
        image_url = image_url.replace('https', 'http')
        try:
            req = request.Request(image_url, headers=headers)
            data = request.urlopen(req).read()
        except Exception as e:
            print('download error: ' + str(e))
            return 
        # save data
        if len(data) > 0:
            with open(save_path, 'wb') as f:
                f.write(data)
        time.sleep(0.1)


def proc_func(crawler, page_indices, save_root):
    for index in page_indices:
        crawler.crawl_page(index, save_root)


def run():
    crawler = Fzfz9()
    save_root = '/Users/liyu/Desktop/data/nsfw_crawl/fzfz9'
    crawler.run(save_root)


def run_mp(nprocs=4):
    crawler = Fzfz9('dongmantupian')
    npages = 133
    step = (npages + nprocs - 1) // nprocs
    save_root = '/Users/liyu/Desktop/data/nsfw_crawl/fzfz9'
    for i in range(nprocs):
        start = i * step
        stop = min(i * step + step, npages)
        indices = [i + 1 for i in range(start, stop)]
        t = multiprocessing.Process(target=proc_func, args=(crawler, indices, save_root))
        t.start()


if __name__ == '__main__':
    # run()
    run_mp(nprocs=8)
