import urllib.request
import os

from selenium import webdriver
from pyquery import PyQuery as pq

# from . import website_base
import website_base

'''
用于抓取网站"看漫画"的漫画
'''


class Ikanman(website_base.website_base):
    '''
    用于抓取网站"看漫画"的漫画
    '''

    def __init__(self, comic_no, save_file_path):
        '''
        初始化对象
        :param comic_no:漫画编号
        :param save_file_path: 存放位置
        '''
        website_url = "http://www.ikanman.com/"
        image_base_path = "{website}/comic/{comic_no}".format(website=website_url, comic_no=comic_no)
        website_base.website_base.__init__(self, website_url, image_base_path, save_file_path)

        # def download_cartoon(self):
        #     # 1、获取章节链接、每个章节的页数
        #     dire_list = self.get_dict()
        #     # 2、获取图片地址
        #     image_object_list = self.get_image_url(dire_list)
        #     print(image_object_list)
        # 3、下载图片
        # self.download_image(image_object_list)

    def get_image_dict_from_html(self, image_base_html):
        '''
        这个方法重写了父类的方法，用于通过漫画主页获取所有章节的地址
        :param image_base_html: 漫画主页地址
        :return:
        '''
        result = []
        doc = pq(image_base_html)
        cha_list = doc("a.status0").items()
        for i in cha_list:
            cha_dic = {"title": (i.attr("title")), "href": self.website_url + i.attr("href")[1:],
                       "page_count": int(i("span")("i").text()[:-1])}
            print(cha_dic)
            result.append(cha_dic)

            # soup = BeautifulSoup(image_base_html, "html.parser")
            # for cha in soup.find_all("a", class_='status0'):
            #     title = cha.get("title")
            #     href = cha.get("href")
            #     page_count = ""
            #     for sub_tag in cha.find_all("i"):
            #         page_count = sub_tag.string
            #     cha_dic = {"title": title[1:-1], "href": self.website_url + href[1:], "page_count": int(page_count[:-1])}
            #     print(cha_dic)
            # result.append(cha_dic)
        result.reverse()
        return result

    def get_image_url(self, dire_list=None):
        print("-" * 8 + "正在获取漫画图片地址......" + "-" * 8)
        if dire_list is None:
            dire_list = {}
        # dire_list的数据：{'title': '39', 'href': 'http://www.ikanman.com/comic/7747/106029.html', 'page_count': 16},{'title': '38','href': 'http://www.ikanman.com/comic/7747/103841.html','page_count': 15}
        result = []
        # browser = webdriver.Firefox()
        browser = webdriver.PhantomJS()
        for cha in dire_list:
            browser.get(cha["href"])
            for page in range(1, int(cha["page_count"] + 1)):
                print("收集第{0}章节的第{1}页的图片地址".format(cha["title"], page))
                image_object = {"image_url": browser.find_element_by_id("mangaFile").get_attribute("src"), "page": page,
                                "cha": cha["title"], "referer": cha["href"]}
                result.append(image_object)
                browser.find_element_by_id("next").click()
        browser.quit()
        print("-" * 8 + "获取漫画图片地址结束" + "-" * 8)
        return result

    def download_image(self, image_object_list):
        print("-" * 8 + "开始下载漫画......" + "-" * 8)
        headers = {
            'User-Agent': "Mozilla/5.0 (Windows NT 10.0; WOW64)",
            'Referer': ''
        }
        # fp = ""
        for image_object in image_object_list:
            headers["Referer"] = image_object["referer"]
            req = urllib.request.Request(image_object["image_url"])
            for i in headers:
                req.add_header(i, headers[i])
            image_stream = urllib.request.urlopen(req).read()
            file_path = "{save_file_path}".format(save_file_path=self.save_file_path)
            if not os.path.exists(file_path):
                os.makedirs(file_path)
            if not os.path.exists("{save_file_path}\\{chp}".format(save_file_path=file_path, chp=image_object["cha"])):
                os.makedirs("{save_file_path}\\{chp}".format(save_file_path=file_path, chp=image_object["cha"]))
            with open(
                    "{save_file_path}\\{chp}\\{filename}.jpg".format(save_file_path=file_path, chp=image_object["cha"],
                                                                     filename=image_object["page"]), "wb") as fp:
                fp.write(image_stream)
            # fp = open("{0}\\{1}\\{2}.jpg".format(file_path, image_object["cha"], image_object["page"]), "wb")
            # fp.write(image_stream)
            # fp.close()
            print("第{0}章的第{1}页已下载完成".format(image_object["cha"], image_object["page"]))
        # fp.close()
        print("-" * 8 + "下载完成" + "-" * 8)


if __name__ == "__main__":
    website = Ikanman("4389", "I:\\manhua_spider\\姐姐不可能那么可爱")
    website.download_cartoon()
    # b = website.get_image_url(a)
    # print(b)
    # 测试help文档
    # import ikanman
    # help(ikanman)
