import requests
from lxml import etree
import time
import random
from urllib import parse
import demo02
import os

class TiebaSpider:
    def __init__(self):
        self.headers = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)'}

    def get_html(self, url):
        html = requests.get(url=url, headers=self.headers).text
        return html

    def xpath_func(self, html, xpath_bds):
        p = etree.HTML(html)
        r_list = p.xpath(xpath_bds)
        return r_list

    def parse_html(self, onr_url):
        onr_html = self.get_html(onr_url)

        xpath_bds = '//img/@src'
        href_list = self.xpath_func(onr_html, xpath_bds)
        print(len(href_list))

        for href in href_list:
            img_url='http:'+href
            self.down_image(img_url)
            # demo02.aggregation()


    def down_image(self, img_url):
        html = requests.get(url=img_url,headers = self.headers).content
        filename = img_url[-8:]
        if not os.path.exists('image'):
            os.mkdir('image')
        with open('image/'+filename,'wb')as f:
            f.write(html)
        print(filename,'下载成功')

    def run(self):
        url=input('请输入网址:')
        self.parse_html(url)
        print('hello world')

if __name__ == '__main__':
    tieba = TiebaSpider()
    tieba.run()


