# 爬虫---你爬取 别人公司网站数据
# 测试-- 你获取自己公司网站数据
import json
import unittest
import requests
# 单元测试
# class LoginTestCase(unittest.TestCase):
#
#     def test_login(self):
#         # 测试 生鲜 项目的 登录功能
#         login_url = 'http://127.0.0.1:8000/login/'
#         data = {
#             'username':'zs',
#             'password':'123456789',
#             'remembered':True
#         }
#         # 发测试请求
#         response = requests.post(login_url, json=data)
#
#         if response.status_code == 200:
#             if json.loads(response.content).get('code') != 0:
#                 raise Exception('登录失败了')
#         else:
#             raise Exception('连接失败！')
#         # 校验测试

# -- 你爬取 别人公司网站数据
def spider_baidu():
    baidu_url = 'https://www.baidu.com/'

    headers = {
        'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36'
    }
    response = requests.get(baidu_url, headers=headers)

    print('请求头', response.request.headers)

    # print(response.text) # ==> str --编码是猜的 不太准
    # print(type(response.text))

    # content == bytes类型---可以直接存入-
    with open('zbaidu.html', 'wb') as f:
        f.write(response.content)

def spider_search():
    key_word = input('请输入想搜索的内容：')

    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36'
    }

    data = {
        'q':key_word
    }
    from urllib.parse import parse_qs, urlencode
    result = urlencode(data)
    print(result)
    result_dict = parse_qs(result)
    print(result_dict)


    bai_url = f'https://www.so.com/s'

    # response = requests.get(bai_url,params=data,headers=headers)
    #
    # with open('zbaidu.html', 'wb') as f:
    #     f.write(response.content)

if __name__ == '__main__':
    spider_search()