import requests
from bs4 import BeautifulSoup

# url = 'https://www.maoyan.com/films?showType=1'
url = "https://www.maoyan.com/cinemas?movieId=1446323"
cityId=50
params = {
    "ci": cityId
}

headers = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
    'Accept-Encoding': 'gzip, deflate, br, zstd',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Cache-Control': 'max-age=0',
    'Connection': 'keep-alive',
    'Cookie': f'__mta=219076518.1714556028439.1714557316064.1714558014685.20; uuid_n_v=v1; uuid=E8F169D0079D11EF9F28D3BFFA06DA4913D0711C4AD14851AF91ED67A5B4570E; b-user-id=7faaae72-d90e-16a8-68da-41b717202699; _lxsdk_cuid=18f3380c4b1c8-07fe7e69767034-26001d51-13c680-18f3380c4b1c8; _lxsdk=E8F169D0079D11EF9F28D3BFFA06DA4913D0711C4AD14851AF91ED67A5B4570E; _csrf=6c1da934867c2c2bd554e3ac8ee5e0479bbb1d78b6e87929cd4ffe8ecb1e383d; _lx_utm=utm_source%3DBaidu%26utm_medium%3Dorganic; Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1714556028,1714788877; Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1714788882; __mta=219076518.1714556028439.1714558014685.1714788882251.21; _lxsdk_s=18f4161c443-b23-ec3-4e6%7C%7C4; ci={cityId}',
    'Host': 'www.maoyan.com',
    'Referer': url,
    'Sec-Fetch-Dest': 'document',
    'Sec-Fetch-Mode': 'navigate',
    'Sec-Fetch-Site': 'same-origin',
    'Sec-Fetch-User': '?1',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
    'sec-ch-ua': '"Chromium";v="124", "Google Chrome";v="124", "Not-A.Brand";v="99"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"Windows"'
}

response = requests.get(url, params=params, headers=headers)

print(response.status_code)
print(response.text)


# 发送请求:
# 调用requests模块里面get请求方法, 对于url地址发送请求, 并且携带上headers请求头伪装, 最后用自定义变量response接受返回数据
# res = requests.get(url=url, headers=headers,allow_redirects=True)
# res.encoding = 'utf-8'
# print(res.content)
# print(res.text)
# <Response [200]> response响应<>对象  200 状态码 表示请求成功

# response = res.text
# print(res.text)

# print(html)

# 创建 BeautifulSoup 对象
soup = BeautifulSoup(response.text, 'html.parser')

# 找到所有的电影条目
# movie_items = soup.find_all('div', class_='movie-item')
# movie_items = soup.find_all('div', class_='channel-detail')
movie_items =  soup.find_all('div', class_='channel-detail movie-item-title')

# 遍历每个电影条目并提取信息
for item in movie_items:
    # 提取电影标题
    # 提取title属性和<a>标签中的内容及href链接
    title_attribute = item.get('title')
    movie_name = item.a.text
    movie_link = item.a['href']


    # 提取电影评分（如果有的话）
    # 提取电影类型
    # 提取电影上映时间


    # 输出电影信息
    print(f"标题属性（Title Attribute）: {title_attribute}")
    print(f"电影名称（Movie Name）: {movie_name}")
    print(f"电影链接（Movie Link）: {movie_link}")
    print("----------------------------------")
