import csv
import time
import traceback
import requests
from bs4 import BeautifulSoup
import json
import pymysql
from time import sleep
import csv

from pyasn1.compat.octets import null


def get1905():
    url='https://www.1905.com/vod/list/n_1/o3p1.html'
    headers={
        'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36'
    }
    #最热
    # 1905电影网一共有99页，每页24部电影 for1-100 输出1-99页
    data_list = []
    for i in range(1,100):
        url_1='https://www.1905.com/vod/list/n_1/o3p'
        auto=str(i)
        url_2='.html'
        url=url_1+auto+url_2
        # print(url)
        response = requests.get(url=url, headers=headers)
        response.encoding = 'utf-8'
        page_text = response.text
        soup = BeautifulSoup(page_text, 'html.parser')
        movie_all = soup.find_all('div', class_="grid-2x grid-3x-md grid-6x-sm")
        movie_dic = {}
        for single in movie_all:
            part_html=str(single)
            part_soup=BeautifulSoup(part_html,'lxml')
            # 添加名字
            name = part_soup.find('a')['title']
            key = '电影名'
            value = name
            movie_dic[key] = value
            # print(name)
            # 添加评分
            try:
                score = part_soup.find('i').text
            except:
                if (len(score) == 0):
                    score = "1905暂无评分"
            key = '评分'
            value = score
            movie_dic[key] = value
            # print(score)
            # 添加path
            path = part_soup.find('a', class_="pic-pack-outer")['href']
            key = 'URL'
            value = path
            movie_dic[key] = value
            # print(path)
            # 添加state
            key = '是否收费'
            value = "免费"
            movie_dic[key] = value
            detail_url = path
            detail = requests.get(url=detail_url, headers=headers)
            detail.encoding = 'utf-8'
            detail_bs = BeautifulSoup(detail.text, 'html.parser')
            # 上映时间
            detail_time_div = detail_bs.find('div',class_="playerBox-info-title")
            span_tag = detail_time_div.find('span', class_="playerBox-info-year")
            if span_tag is not None:
                detail_time = span_tag.text
            else:
                # 如果没有找到元素，你可以设置一个默认值或者处理错误
                detail_time = "未知年份"  # 或者其他你认为合适的默认值
            detail_time_cleaned = detail_time[1:-1]
            key = '时间'
            value = detail_time_cleaned
            movie_dic[key] = value
            detail_info = detail_bs.find('ul', class_="playerBox-info-vodInfoList")
            # 导演
            directors_span = detail_info.find('span', class_='playerBox-info-vodDirector')
            director_names = []
            if directors_span is not None:
                directors = directors_span.find_all('a')
                director_names = [director['title'] for director in directors]  # 提取导演名字并存储在一个列表中
            else:
                director_names.append(null)
            key = '导演'
            value = director_names
            movie_dic[key] = value
            # 主演
            actor_span = detail_info.find('span', class_='playerBox-info-vodMp')
            actor_names = []
            if actor_span is not None:
                actors = actor_span.find_all('a')
                actor_names = [actor['title'] for actor in actors]
            else:
                actor_names.append(null)
            key = '主演'
            value = actor_names
            movie_dic[key] = value
            # 类型
            type_span = detail_info.find('span', class_='playerBox-info-vodType')
            if type_span is not None:
                types = type_span.find_all('a')
                type_names = [type['title'] for type in types]
            else:
                type_names.append(null)
            key = '类型'
            value = type_names
            movie_dic[key] = value
            # 地区
            infoitem_span = detail_info.find('span', class_='playerBox-info-vodDistrict')
            if infoitem_span is not None:
                infoitems = infoitem_span.find_all('a')
                infoitem_names = [infoitem['title'] for infoitem in infoitems]
            else:
                infoitem_names.append(null)
            key = '地区'
            value = infoitem_names
            movie_dic[key] = value
            # 播放量
            detail_playtime_div = detail_bs.find('div', id="playTimes").text
            # detail_playtime_span =detail_playtime_div.get_text('span')

            print(detail_playtime_div)

            # # 国家
            # city_span_href = detail_info.find('a',string='[更多资料]')['href']
            # url_city=city_span_href
            # url_2="info"
            # url=url_city+url_2
            # city_info=requests.get(url=url, headers=headers)
            # city_info.encoding='utf-8'
            # city_bs = BeautifulSoup(city_info.text, 'html.parser')
            # city_bs_info=city_bs.find('div',class_="content")
            # h3_tag = city_bs_info.find('h3', string='出品国家/地区')
            # if h3_tag:
            #     content_div = h3_tag.find_next_sibling('div', class_='content')
            #     if content_div:
            #         china_p_tag = content_div.find('p')
            #         if china_p_tag:
            #             key = '国家'
            #             # 调用get_text()方法而不是只引用它
            #             value = china_p_tag.get_text(strip=True)  # 添加strip=True来移除前后的空白字符
            #             movie_dic[key] = value
            #             print(movie_dic)
            data_list.append(movie_dic)
            # print(data_list)
            movie_dic = {}

        # 使用 with 语句来确保文件在操作完成后被正确关闭
            with open('movies.csv', 'w', encoding='utf-8') as f:
                # 将文件对象转换成 DictWriter 对象
                writer = csv.DictWriter(f, fieldnames=['电影名', '评分', 'URL', '是否收费','时间', '导演', '主演', '类型', '地区','国家'])
                # 写入表头
                writer.writeheader()
                # 假设 data_list 是一个字典列表，每个字典都有与 fieldnames 相对应的键
                writer.writerows(data_list)
        print("---------------------------------------------1")
    # 好评
      # 1905电影网一共有99页，每页24部电影 for1-100 输出1-99页
    for i in range(1, 100):
         url_1 = 'https://www.1905.com/vod/list/n_1/o4p'
         auto = str(i)
         url_2 = '.html'
         url = url_1 + auto + url_2
         # print(url)
         response = requests.get(url=url, headers=headers)
         response.encoding = 'utf-8'
         page_text = response.text
         soup = BeautifulSoup(page_text, 'html.parser')
         # print(page_text)
         movie_all = soup.find_all('div', class_="grid-2x grid-3x-md grid-6x-sm")
         for single in movie_all:
             part_html = str(single)
             part_soup = BeautifulSoup(part_html, 'lxml')
             # 添加名字
             name = part_soup.find('a')['title']
             key = '电影名'
             value = name
             movie_dic[key] = value
             # print(name)
             # 添加评分
             try:
                 score = part_soup.find('i').text
             except:
                 if (len(score) == 0):
                     score = "1905暂无评分"
             key = '评分'
             value = score
             movie_dic[key] = value
             # print(score)
             # 添加path
             path = part_soup.find('a', class_="pic-pack-outer")['href']
             key = 'URL'
             value = path
             movie_dic[key] = value
             # print(path)
             # 添加state
             key = '是否收费'
             value = "免费"
             movie_dic[key] = value
             detail_url = path
             detail = requests.get(url=detail_url, headers=headers)
             detail.encoding = 'utf-8'
             detail_bs = BeautifulSoup(detail.text, 'html.parser')
             # 上映时间
             detail_time_div = detail_bs.find('div', class_="playerBox-info-title")
             span_tag = detail_time_div.find('span', class_="playerBox-info-year")
             if span_tag is not None:
                 detail_time = span_tag.text
             else:
                 # 如果没有找到元素，你可以设置一个默认值或者处理错误
                 detail_time = "未知年份"  # 或者其他你认为合适的默认值
             detail_time_cleaned = detail_time[1:-1]
             key = '时间'
             value = detail_time_cleaned
             movie_dic[key] = value
             detail_info = detail_bs.find('ul', class_="playerBox-info-vodInfoList")
             # 导演
             directors_span = detail_info.find('span', class_='playerBox-info-vodDirector')
             director_names = []
             if directors_span is not None:
                 directors = directors_span.find_all('a')
                 director_names = [director['title'] for director in directors]  # 提取导演名字并存储在一个列表中
             else:
                 director_names.append('none')
             key = '导演'
             value = director_names
             movie_dic[key] = value
             # 主演
             actor_span = detail_info.find('span', class_='playerBox-info-vodMp')
             actor_names = []
             if actor_span is not None:
                 actors = actor_span.find_all('a')
                 actor_names = [actor['title'] for actor in actors]
             else:
                 actor_names.append(null)
             key = '主演'
             value = actor_names
             movie_dic[key] = value
             # 类型
             type_span = detail_info.find('span', class_='playerBox-info-vodType')
             if type_span is not None:
                 types = type_span.find_all('a')
                 type_names = [type['title'] for type in types]
             else:
                 type_names.append(null)
             key = '类型'
             value = type_names
             movie_dic[key] = value
             # 地区
             infoitem_span = detail_info.find('span', class_='playerBox-info-vodDistrict')
             if infoitem_span is not None:
                 infoitems = infoitem_span.find_all('a')
                 infoitem_names = [infoitem['title'] for infoitem in infoitems]
             else:
                 infoitem_names.append(null)
             key = '地区'
             value = infoitem_names
             movie_dic[key] = value
             templist = []
             data_list.append(movie_dic)
             print(movie_dic)
             movie_dic = {}
             sleep(1)

             # 使用 with 语句来确保文件在操作完成后被正确关闭
             with open('movies.csv', 'w', encoding='utf-8') as f:
                 # 将文件对象转换成 DictWriter 对象
                 writer = csv.DictWriter(f, fieldnames=['电影名', '评分', 'URL', '是否收费', '时间','导演', '主演', '类型',
                                                        '地区'])
                 # 写入表头
                 writer.writeheader()
                 # 假设 data_list 是一个字典列表，每个字典都有与 fieldnames 相对应的键
                 writer.writerows(data_list)
         print("---------------------------------------------2")
           # 最新
         # 1905电影网一共有99页，每页24部电影 for1-100 输出1-99页
    for i in range(1,100):
         url_1 = 'https://www.1905.com/vod/list/n_1/o1p'
         auto = str(i)
         url_2 = '.html'
         url = url_1 + auto + url_2
         # print(url)
         response = requests.get(url=url, headers=headers)
         response.encoding = 'utf-8'
         page_text = response.text
         soup = BeautifulSoup(page_text, 'html.parser')
         # print(page_text)
         movie_all = soup.find_all('div', class_="grid-2x grid-3x-md grid-6x-sm")
         for single in movie_all:
             part_html = str(single)
             part_soup = BeautifulSoup(part_html, 'lxml')
             # 添加名字
             name = part_soup.find('a')['title']
             key = '电影名'
             value = name
             movie_dic[key] = value
             # print(name)
             # 添加评分
             try:
                 score = part_soup.find('i').text
             except:
                 if (len(score) == 0):
                     score = "1905暂无评分"
             key = '评分'
             value = score
             movie_dic[key] = value
             # print(score)
             # 添加path
             path = part_soup.find('a', class_="pic-pack-outer")['href']
             key = 'URL'
             value = path
             movie_dic[key] = value
             # print(path)
             # 添加state
             key = '是否收费'
             value = "免费"
             movie_dic[key] = value
             detail_url = path
             detail = requests.get(url=detail_url, headers=headers)
             detail.encoding = 'utf-8'
             detail_bs = BeautifulSoup(detail.text, 'html.parser')
             # 上映时间
             detail_time_div = detail_bs.find('div', class_="playerBox-info-title")
             span_tag = detail_time_div.find('span', class_="playerBox-info-year")
             if span_tag is not None:
                 detail_time = span_tag.text
             else:
                 # 如果没有找到元素，你可以设置一个默认值或者处理错误
                 detail_time = "未知年份"  # 或者其他你认为合适的默认值
             detail_time_cleaned = detail_time[1:-1]
             key = '时间'
             value = detail_time_cleaned
             movie_dic[key] = value
             detail_info = detail_bs.find('ul', class_="playerBox-info-vodInfoList")
             # 导演
             directors_span = detail_info.find('span', class_='playerBox-info-vodDirector')
             director_names = []
             if directors_span is not None:
                 directors = directors_span.find_all('a')
                 director_names = [director['title'] for director in directors]  # 提取导演名字并存储在一个列表中
             else:
                 director_names.append(None)
             key = '导演'
             value = director_names
             movie_dic[key] = value
             # 主演
             actor_span = detail_info.find('span', class_='playerBox-info-vodMp')
             actor_names = []
             if actor_span is not None:
                 actors = actor_span.find_all('a')
                 actor_names = [actor['title'] for actor in actors]
             else:
                 actor_names.append(None)
             key = '主演'
             value = actor_names
             movie_dic[key] = value
             # 类型
             type_span = detail_info.find('span', class_='playerBox-info-vodType')
             if type_span is not None:
                 types = type_span.find_all('a')
                 type_names = [type['title'] for type in types]
             else:
                 type_names.append(null)
             key = '类型'
             value = type_names
             movie_dic[key] = value
             # 地区
             infoitem_span = detail_info.find('span', class_='playerBox-info-vodDistrict')
             if infoitem_span is not None:
                 infoitems = infoitem_span.find_all('a')
                 infoitem_names = [infoitem['title'] for infoitem in infoitems]
             else:
                     infoitem_names.append(None)
             key = '地区'
             value = infoitem_names
             movie_dic[key] = value
             sleep(0.1)
             # print(movie_dic)
             data_list.append(movie_dic)
             print(movie_dic)


             movie_dic = {}
             sleep(1)

         # 使用 with 语句来确保文件在操作完成后被正确关闭
             with open('movies.csv', 'w', encoding='utf-8') as f:
                 # 将文件对象转换成 DictWriter 对象
                 writer = csv.DictWriter(f,
                                         fieldnames=['电影名', '评分', 'URL', '是否收费','时间','导演', '主演', '类型', '地区'])
                 # 写入表头
                 writer.writeheader()
                 # 假设 data_list 是一个字典列表，每个字典都有与 fieldnames 相对应的键
                 writer.writerows(data_list)

    print('end')






    # 示例 data_list

# 调用函数

# def insert_1905():
#     cursor = None
#     conn = None
#     try:
#         count = 0
#         list = get1905()
#         print(f"{time.asctime()}开始插入1905电影数据")
#         conn, cursor = get_conn()
#         sql = "insert into movie1905 (id,name,score,path,state,director,actor,type,infoitem) values(%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#         for item in list:
#                 print(item)
#                 # 异常捕获，防止数据库主键冲突
#         try:
#             cursor.execute(sql, [0, item[0], item[1], item[2], item[3],item[4],item[5],item[6],item[7]])
#         except pymysql.err.IntegrityError:
#                 print("重复！跳过！")
#         conn.commit()  # 提交事务 update delete insert操作
#         print(f"{time.asctime()}插入1905电影数据完毕")
#     except:
#         traceback.print_exc()
#     finally:
#         close_conn(conn, cursor)
#     return;
#
# #连接数据库  获取游标
# def get_conn():
#     """
#     :return: 连接，游标
#     """
#     # 创建连接
#     conn = pymysql.connect(host= 'localhost',
#                     user="root",
#                     password="123456",
#                     db="movie1905",
#                     charset="utf8")
#     # 创建游标
#     cursor = conn.cursor()  # 执行完毕返回的结果集默认以元组显示
#     if ((conn != None) & (cursor != None)):
#         print("数据库连接成功！游标创建成功！")
#     else:
#         print("数据库连接失败！")
#     return conn, cursor
# #关闭数据库连接和游标
# def close_conn(conn, cursor):
#     if cursor:
#         cursor.close()
#     if conn:
#         conn.close()
#     return 1
#
if __name__ == '__main__':
  get1905()
  # insert()


