#!/usr/bin/env python
# -*- coding: utf-8 -*-


# 方法2：
import requests
from urllib.parse import urljoin

BASE_URL = 'https://login2.scrape.center/'
LOGIN_URL = urljoin(BASE_URL, '/login')
INDEX_URL = urljoin(BASE_URL, '/page/1')
USERNAME = 'admin'
PASSWORD = 'admin'

session = requests.Session()

response_login = session.post(LOGIN_URL, data={
   'username': USERNAME,
   'password': PASSWORD
})

"""
借助于requests内置的Session对象来帮我们自动处理 Cookies，使用了 Session 对象之后，
requests 会将每次请求后需要设置的 Cookies 自动保存好，并在下次请求时自动携带上去，就相当于帮我们维持了一个 Session 对象
"""
cookies = session.cookies  #
print('Cookies', cookies)

response_index = session.get(INDEX_URL)
# print(response_index.text)
print('Response Status', response_index.status_code)
print('Response URL', response_index.url)

with open('film2.html', 'w', encoding='utf-8') as fp:
    fp.write(response_index.text)  # 将登陆后获取的html页面保存到本地



