import csv
import json
import os
import re
import time

import django
import jsonpath
import pandas as ps
import requests
from bs4 import BeautifulSoup
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangoProject.settings')
django.setup()
from myApp.models import Movies


# def spider(spiderTarget,start):
#     headers = {
#         'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36',
#         'Cookie':'ll="118188"; bid=twmV0v063fQ; _pk_id.100001.4cf6=5c45487387cdf85d.1741588925.; ap_v=0,6.0; __utmc=30149280; __utmz=30149280.1741588926.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); __utmc=223695111; __utmz=223695111.1741588926.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); __yadk_uid=wB26xbu7ckkfPs2tIq04uZaCljkBaMkM; _vwo_uuid_v2=D486AC901EE4EF2C1664918314A6A84C3|9d5add436c6bbe85c81e49a19378e874; __utma=30149280.1379719550.1741588926.1741588926.1741591753.2; __utma=223695111.549423848.1741588926.1741588926.1741591753.2'
#     }
#
#     params = {
#         'page_start':start
#     }
#
#     movieAllRes = requests.get(spiderTarget,params=params,headers=headers)
#     movieAllRes = movieAllRes.json()
#
#     moviesInformation = jsonpath.jsonpath(movieAllRes,'$.subjects')[0]
#     detailUrls = jsonpath.jsonpath(movieAllRes,'$.subjects..url')
#     print(moviesInformation)
#     for i,movieInformation in enumerate(moviesInformation):
#         resultData = {}
#         #评分
#         resultData['rate'] = movieInformation['rate']
#         #影片名
#         resultData['title'] = movieInformation['title']
#         #封面
#         resultData['cover'] = movieInformation['cover']
#         #详情页url
#         resultData['detailUrls'] = detailUrls[i]
#
#         detailUrlsRes = requests.get(detailUrls[i],headers=headers)
#         soup = BeautifulSoup(detailUrlsRes.text,'lxml')
#         #上映年份
#         resultData['year'] = re.findall('[(](.*?)[)]', soup.find('span',class_='year').get_text())[0]
#         #影片类型
#         types = soup.find_all('span',property='v:genre')
#         for i,span in enumerate(types):
#             types[i] = span.get_text()
#         resultData['type'] = ','.join(types)
#         #制片国家
#         country = soup.find_all('span',class_='pl')[4].next_sibling.strip().split(sep='/')
#         for i,span in enumerate(country):
#             country[i] = span.strip()
#         resultData['country'] = ','.join(country)
#         #语言
#         language = soup.find_all('span',class_='pl')[5].next_sibling.strip().split(sep='/')
#         for i,span in enumerate(language):
#             country[i] = span.strip()
#         resultData['language'] = ','.join(language)
#         #上映时间
#         upTimes = soup.find_all('span',property='v:initialReleaseDate')
#         upTimesStr = ''
#         for i in upTimes:
#             upTimesStr = upTimesStr + i.get_text()
#         upTime = re.findall('\d*-\d*-\d*',upTimesStr)[0]
#         resultData['time'] = upTime
#         #时长
#         if soup.find_all('span', property='v:runtime'):
#             resultData['movieTime'] = re.findall('\d+',soup.find('span', property='v:runtime').get_text())[0]
#         else:
#             resultData['movieTime'] = 0
#         #评论人数
#         resultData['comment_len'] = soup.find('span',property='v:votes').get_text()
#         #星星比例
#         starts=[]
#         startAll = soup.find_all('span',class_='rating_per')
#         for i in startAll:
#             starts.append(i.get_text())
#         resultData['starts'] = '.'.join(starts)
#         #影片简介
#         resultData['summary'] = soup.find('span', property='v:summary').get_text().strip()
#         #五条热评
#         comment_info = soup.find_all('span',class_='comment-info')
#         comments = [{} for x in range(5)]
#         for i,comment in enumerate(comment_info):
#             comments[i]['user'] = comment.contents[1].get_text()
#             comments[i]['start'] = re.findall('(\d*)', comment.contents[5].attrs['class'][0])[7]
#             try:
#                 comments[i]['time'] = comment.contents[7].attrs['title']
#             except:
#                 comments[i]['time'] = '2024-06-10 12:00:25'
#         contents = soup.find_all('span',class_='short')
#         for i in range(5):
#             comments[1]['comments'] = contents[i].get_text()
#         resultData['comments'] = json.dumps(comments)
#         #图片
#         imgList = []
#         lis = soup.select('.related-pic-bd  img')
#         for i in lis:
#             imgList.append(i['src'])
#         resultData['imgList'] = '.'.join(imgList)
#         #视频
#         if soup.find('a',class_='related-pic-video'):
#             movieUrl = soup.find('a',class_='related-pic-video').attrs['href']
#             foreshowMovieRes = requests.get(movieUrl,headers=headers)
#             foreshowMovieSoup = BeautifulSoup(foreshowMovieRes.text,'lxml')
#             movieUrl = foreshowMovieSoup.find('source').attrs['src']
#             resultData['movieUrl'] = movieUrl
#         else:
#             resultData['movieUrl'] = '0'
#         print(resultData)
#         result.append(resultData)
def spider(spiderTarget, start):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36',
        'Cookie': 'll="118188"; bid=twmV0v063fQ; _pk_id.100001.4cf6=5c45487387cdf85d.1741588925.; ap_v=0,6.0; __utmc=30149280; __utmz=30149280.1741588926.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); __utmc=223695111; __utmz=223695111.1741588926.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); __yadk_uid=wB26xbu7ckkfPs2tIq04uZaCljkBaMkM; _vwo_uuid_v2=D486AC901EE4EF2C1664918314A6A84C3|9d5add436c6bbe85c81e49a19378e874; __utma=30149280.1379719550.1741588926.1741588926.1741591753.2; __utma=223695111.549423848.1741588926.1741588926.1741591753.2'
    }

    params = {
        'page_start': start
    }

    movieAllRes = requests.get(spiderTarget, params=params, headers=headers)

    movieAllRes = movieAllRes.json()

    moviesInformation = jsonpath.jsonpath(movieAllRes, '$.subjects')[0]
    detailUrls = jsonpath.jsonpath(movieAllRes, '$.subjects..url')
    print(moviesInformation)
    for i, movieInformation in enumerate(moviesInformation):
        time.sleep(5)
        resultData = {}
        # 评分
        resultData['rate'] = movieInformation['rate']
        # 影片名
        resultData['title'] = movieInformation['title']
        # 封面
        resultData['cover'] = movieInformation['cover']
        # 详情页url
        resultData['detailUrls'] = detailUrls[i]

        detailUrlsRes = requests.get(detailUrls[i], headers=headers)
        soup = BeautifulSoup(detailUrlsRes.text, 'lxml')
        # 上映年份
        resultData['year'] = re.findall('[(](.*?)[)]', soup.find('span', class_='year').get_text())[0]
        # 影片类型
        types = soup.find_all('span', property='v:genre')
        for i, span in enumerate(types):
            types[i] = span.get_text()
        resultData['type'] = ','.join(types)
        # 制片国家
        country = soup.find_all('span', class_='pl')[4].next_sibling.strip().split(sep='/')
        country = [span.strip() for span in country]
        resultData['country'] = ','.join(country)
        # 语言
        language = soup.find_all('span', class_='pl')[5].next_sibling.strip().split(sep='/')
        language = [span.strip() for span in language]
        resultData['language'] = ','.join(language)
        # 上映时间
        upTimes = soup.find_all('span', property='v:initialReleaseDate')
        upTimesStr = ''
        for i in upTimes:
            upTimesStr = upTimesStr + i.get_text()
        upTime = re.findall('\d*-\d*-\d*', upTimesStr)[0]
        resultData['time'] = upTime
        # 时长
        if soup.find_all('span', property='v:runtime'):
            resultData['movieTime'] = re.findall('\d+', soup.find('span', property='v:runtime').get_text())[0]
        else:
            resultData['movieTime'] = 0
        # 评论人数
        resultData['comment_len'] = soup.find('span', property='v:votes').get_text()
        # 星星比例
        starts = []
        startAll = soup.find_all('span', class_='rating_per')
        for i in startAll:
            starts.append(i.get_text())
        resultData['starts'] = '.'.join(starts)
        # 影片简介
        resultData['summary'] = soup.find('span', property='v:summary').get_text().strip()
        # 二条热评
        comment_info = soup.find_all('span', class_='comment-info')
        comments = [{} for x in range(2)]
        for i, comment in enumerate(comment_info):
            comments[i]['user'] = comment.contents[1].get_text()
            comments[i]['start'] = re.findall('(\d*)', comment.contents[5].attrs['class'][0])[7]
            try:
                comments[i]['time'] = comment.contents[7].attrs['title']
            except:
                comments[i]['time'] = '2024-06-10 12:00:25'
        contents = soup.find_all('span', class_='short')
        for i in range(5):
            comments[i]['comments'] = contents[i].get_text()
        resultData['comments'] = json.dumps(comments)
        # 图片
        imgList = []
        lis = soup.select('.related-pic-bd  img')
        for i in lis:
            imgList.append(i['src'])
        resultData['imgList'] = '.'.join(imgList)
        # 视频
        if soup.find('a', class_='related-pic-video'):
            movieUrl = soup.find('a', class_='related-pic-video').attrs['href']
            foreshowMovieRes = requests.get(movieUrl, headers=headers)
            foreshowMovieSoup = BeautifulSoup(foreshowMovieRes.text, 'lxml')
            movieUrl = foreshowMovieSoup.find('source').attrs['src']
            resultData['movieUrl'] = movieUrl
        else:
            resultData['movieUrl'] = '0'
        print(resultData)
        resultDataValue = list(resultData.values())
        save_to_csvTwo(resultDataValue)
        result.append(resultData)


def save_to_csv(df):
    df.to_csv('./datas.csv')

def save_to_csvTwo(rowData):
    with open('./datasTwo.csv','a',newline='',encoding='utf-8') as wf:
        writer = csv.writer(wf)
        writer.writerow(rowData)

def main():
    global result
    result = []
    with open('./pageNum.txt','r') as fr:
        page = int(fr.readlines()[-1])
        print('开始爬取第%s个20' % page)
        spider(spiderTarget,page * 20)
        time.sleep(10)
        df = ps.DataFrame(result)
        save_to_csv(df)
        with open('./pageNum.txt', 'a') as fa:
            fa.write('\n' + str(page + 1))
        result = []
        for i in range(len(result)):
            result.pop()
    main()

def clear_csv():
    df = ps.read_csv('./datas.csv')
    df.dropna(inplace=True)
    df.drop_duplicates(inplace=True)
    return df.values

def save_to_sql():
    data = clear_csv()
    for movie in data:
        try:
            Movies.objects.create(
                detailLink=movie[0],
                rate=movie[1],
                title=movie[2],
                cover=movie[3],
                year=movie[4],
                types=movie[5],
                country=movie[6],
                lang=movie[7],
                time=movie[8],
                movieTime=movie[9],
                comment_len=movie[10],
                starts=movie[11],
                summary=movie[12],
                comments=movie[13],
                imgList=movie[14],
                movieUrl=movie[15]
            )
        except:
            pass



if __name__ == '__main__':
    print("开始")
    spiderTarget = 'https://movie.douban.com/j/search_subjects?type=movie&tag=%E7%83%AD%E9%97%A8&sort=recommend&page_limit=20'
    # save_to_sql()
    main()
