import random
import urllib.parse
import urllib.request


class UrllibDemo:
    def write_file(self, html, filename):
        """
        作用：保存服务器响应文件到本地磁盘文件里
        html: 服务器响应文件
        filename: 本地磁盘文件名
        """
        print("正在存储" + filename)
        with open(filename, 'w', encoding='utf-8') as f:
            f.write(html)

    def load_page(self, url, filename):
        """
        作用：根据url发送请求，获取服务器响应文件
        url：需要爬取的url地址
        filename: 文件名
        """
        print("正在下载" + filename)
        headers = {"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;"}
        request = urllib.request.Request(url, headers=headers)
        response = urllib.request.urlopen(request)
        return response.read().decode()

    def tieba_spider(self, url, begin_page, end_page):
        """
        作用：负责处理url，分配每个url去发送请求
        url：需要处理的第一个url
        beginPage: 爬虫执行的起始页面
        endPage: 爬虫执行的截止页面
        """
        for page in range(begin_page, end_page + 1):
            pn = (page - 1) * 50
            filename = "./ext/cUrllib/" + str(page) + ".html"
            # 组合为完整的 url，并且pn值每次增加50
            fullurl = url + "&pn=" + str(pn)
            print(fullurl)
            # 调用loadPage()发送请求获取HTML页面
            html = self.load_page(fullurl, filename)
            # 将获取到的HTML页面写入本地磁盘文件
            self.write_file(html, filename)

    def _test_urlopen(self):
        """
        ud = UrllibDemo()
        ud._test_urlopen()
        """
        response = urllib.request.urlopen('http://www.baidu.com')
        html = response.read()
        print('urlopen请求的结果: ', html)

    def _test_request(self):
        """
        ud = UrllibDemo()
        ud._test_request()
        """
        request = urllib.request.Request('http://www.baidu.com')
        response = urllib.request.urlopen(request)
        html = response.read().decode()
        print('request请求的结果: ', html)

    def _test_request_params(self):
        """
        ud = UrllibDemo()
        ud._test_request_params()
        """
        url = 'http://www.baidu.com/s'
        word = {'wd': '你好'}
        word = urllib.parse.urlencode(word)
        newurl = url + '?' + word
        headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, "
                                 "like Gecko) Chrome/51.0.2704.103 Safari/537.36"}
        request = urllib.request.Request(newurl, headers=headers)
        response = urllib.request.urlopen(request)
        print('get方式请求拼接字符串的结果: ', response.read())

    def _test_useragent(self):
        """
        ud = UrllibDemo()
        ud._test_useragent()
        """
        url = 'http://www.baidu.com'
        ua_header = {"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;"}
        request = urllib.request.Request(url, headers=ua_header)
        response = urllib.request.urlopen(request)
        html = response.read()
        print('user-agent请求的结果: ', html)

    def _test_useragent_random(self):
        """
        ud = UrllibDemo()
        ud._test_useragent_random()
        """
        # 随机增加修改user_agent
        url = 'http://www.baidu.com'
        ua_list = [
            'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36',
        ]
        user_agent = random.choice(ua_list)
        request = urllib.request.Request(url)
        request.add_header('User-Agent', user_agent)
        print('增加user-agent请求的代理人: ', request.get_header('User-agent'))
        response = urllib.request.urlopen(request)
        html = response.read()
        print('增加user_agent请求的结果: ', html)

    def _test_header(self):
        """
        ud = UrllibDemo()
        ud._test_header()
        """
        url = 'http://www.baidu.com'
        header = {"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;"}
        request = urllib.request.Request(url, headers=header)
        request.add_header("Connection", 'keep-alive')
        response = urllib.request.urlopen(request)
        print('增加header请求的响应码: ', response.code)
        html = response.read().decode()
        print('增加header请求的结果: ', html)

    def _test_urlencode(self):
        """
        ud = UrllibDemo()
        ud._test_urlencode()
        """
        word = {'wd': '你好'}
        print('url编码: ', urllib.parse.urlencode(word))
        print('url编码还原字符串: ', urllib.parse.unquote('wd=%E4%BD%A0%E5%A5%BD'))

    def _test_tieba_baidu(self):
        """
        ud = UrllibDemo()
        ud._test_tieba_baidu()
        """
        # 百度贴吧爬虫
        kw = input("请输入需要爬取的贴吧:")
        # 输入起始页和终止页，str转成int类型
        begin_page = int(input("请输入起始页："))
        end_page = int(input("请输入终止页："))
        url = "http://tieba.baidu.com/f?"
        key = urllib.parse.urlencode({"kw": kw})
        # 组合后的url示例：http://tieba.baidu.com/f?kw=lol
        url = url + key
        self.tieba_spider(url, begin_page, end_page)

    def _test_translate_youdao(self):
        """
        ud = UrllibDemo()
        ud._test_translate_youdao()
        """
        # 有道词典翻译网站
        url = "http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule&smartresult=ugc&sessionFrom=null"
        headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML,"
                                 " like Gecko) Chrome/50.0.2661.102 Safari/537.36"}
        formdata = {
            "type": "AUTO",
            "i": "i love python",
            "doctype": "json",
            "xmlVersion": "1.8",
            "keyfrom": "fanyi.web",
            "ue": "UTF-8",
            "action": "FY_BY_ENTER",
            "typoResult": "true",
        }
        data = urllib.parse.urlencode(formdata)
        request = urllib.request.Request(url, data=data.encode(), headers=headers)
        response = urllib.request.urlopen(request)
        print(response.read())


if __name__ == '__main__':
    ud = UrllibDemo()
    ud._test_translate_youdao()
