import urllib.request
import chardet

if __name__ == '__main__':
    # 0
    # url == 统一资源定位符
    # uri == 统一资源标识符

    # 1
    # 爬虫就是一个程序，用于沿着互联网节点爬行，不断访问不同网站，获取它所需要的资源


    # 2
    # 不能重复爬取同一个url的内容，如果一个url内容中包含他本身，那么就会陷入无限递归

    # 3
    # 禁止百度爬虫访问网站中敏感内容
    # 在网站根目录下创建并编辑robots.txt文件，用于表明您不希望被抓取哪些内容

    # 4
    # urllib.request.urlopen()返回一个HTTPResponse实例对象，它属于http.client模块

    # 5
    # 如果访问的网址不存在，会产生HTTPError异常



    # 动手吧
    # 0
    response = urllib.request.urlopen('http://www.fishc.com')
    # response = response[0:300]
    print(type(response))
    html = response.read()


    # 解码操作
    # html = html.decode('utf-8')
    # html = html[0:300]

    # print(html)




    def checkCode():
        response = urllib.request.urlopen('http://www.fishc.com')
        html = response.read().decode()
        # print(chardet.detect(html)['encoding'])
        print(type(html))


    # 1
    checkCode()


    def saveUrls():
        try:
            with open('urls.txt') as f:
                len = 1
                for each in f:
                    with open('url_%d.txt' % len, 'w') as tf:
                        response = urllib.request.urlopen(each)

                        if chardet.detect(html)['encoding'] != 'utf-8':
                            html = response.read()
                            # html = response.read().decode('utf-8')
                            tf.write(html)
                        else:
                            html = response.read().decode()
                            tf.write(html)

                    len += 1
        except IOError as e:
            print('error', e)


    def save2():
        i = 0

        with open('urls.txt', 'r') as f:
            # 读取待访问地址
            # 由于urls.txt每一行一个url
            # 按\n切个
            urls = f.read().splitlines()

            for each_url in urls:
                response = urllib.request.urlopen(each_url)
                print(response.getcode())
                html = response.read()

                # 识别网页编码
                encode = chardet.detect(html)['encoding']
                if encode == 'GB2312':
                    encode = 'GBK'

                i += 1
                filename = 'url_%d.txt' % i
                with open(filename, 'w', encoding=encode) as each_file:
                    each_file.write(html.decode(encode, 'ignore'))


    save2()
