import requests
from bs4 import BeautifulSoup


class NeiHan(object):
    def __init__(self, targetUrl):
        super(NeiHan, self).__init__()
        self.__targetUrl = targetUrl
        self.__header = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.119 Safari/537.36'
        }

    def __getPageContent(self, url):
        response = requests.get(url, headers=self.__header);
        if response.status_code == 200:
            response.encoding = "UTF-8"
            html = response.text
        else:
            html = "<html></html>"
        return html

    def __generater(self, url, func):
        page = self.__getPageContent(url)
        return func(page)

    def __parserPage(self, url, **kwargs):
        items = {}
        page = self.__getPageContent(url)
        soup = BeautifulSoup(page, 'lxml')
        elements = kwargs["elementsFunc"](soup)
        itemFunc = kwargs["itemFunc"];
        for element in elements:
            items = itemFunc(element)
            yield items

    def __parserItemPriamy(self):
        items = {}
        html = self.__getPageContent(self.__targetUrl)
        soup = BeautifulSoup(html, 'lxml')
        print(soup.head.title.text)
        elements = soup.find_all(class_="liL")[0].find_all("li")
        for element in elements:
            items["title"] = element.a["title"]
            items["link"] = element.a["href"]
            yield items

    def __parserItemDetail(self, url, title):
        items = {}
        html = self.__getPageContent(url)
        soup = BeautifulSoup(html, 'lxml')
        elements = soup.find(class_="articleBody")
        for element in elements:
            src = element.img['src'];
            ir = requests.get(src)
            open("./download/" + title + ".jpg", "wb").write(ir.content)
            print(element.img['src'])

    def download(self):
        for item in self.__parserItemPriamy():
            self.__parserItemDetail("http://www.kx1d.com" + item['link'], item['title'])

    def download2(self):
        def parserPrimary(page):
            items = {}
            soup = BeautifulSoup(page, 'lxml')
            print(soup.head.title.text)
            elements = soup.find_all(class_="liL")[0].find_all("li")
            for element in elements:
                items["title"] = element.a["title"]
                items["link"] = element.a["href"]
                yield items

        def parserDetail(page):
            items = {}
            soup = BeautifulSoup(page, 'lxml')
            elements = soup.find(class_="articleBody")
            for element in elements:
                items["link"] = element.img["src"]
                yield items

        for detaiPage in self.__generater(self.__targetUrl, parserPrimary):
            url = "http://www.kx1d.com" + detaiPage['link']
            for content in self.__generater(url, parserDetail):
                print(content["link"]);

    def download3(self):
        def elementsFunc(soup):
            return soup.find_all(class_="liL")[0].find_all("li")

        def itemFunc(element):
            items = {}
            items["title"] = element.a["title"]
            items["link"] = element.a["href"]
            return items

        for detailPage in self.__parserPage(self.__targetUrl, elementsFunc=elementsFunc, itemFunc=itemFunc):
            print(detailPage["title"])


if __name__ == "__main__":
    neihan = NeiHan("http://www.kx1d.com/neihanmanhua/")
    neihan.download3()
