#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File    :   extract_messages.py
@Contact :   xxzhang16@fudan.edu.cn

@Modify Time      @Author    @Version    @Desciption
------------      -------    --------    -----------
2022/3/17 20:09   zxx      1.0         None
'''

# import lib
import jsonlines
import imdb
import requests
from bs4 import BeautifulSoup
from threading import Thread

# class IPCrawler:
#     def __init__(self):
#         pass
#     def _get_hosts(self):
#         pass
#     def _check_hosts(self):
#         pass
#     def save_to_file(self):
#         pass
#     def load_hosts_file(self):
#         pass


class Crawler:
    def __init__(self):
        pass

    def _get_host(self) -> str:

        return proxy

    def _get_response(self, url) -> requests.Response:
        # get a new host from proxypool
        proxy = self._get_host()
        proxies = {
            'http': 'http://' + proxy,
            'https': 'https://' + proxy
        }

        # headers
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9.1.6) ",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
            "Accept-Language": "en-us",
            "Connection": "keep-alive",
            "Accept-Charset": "GB2312,utf-8;q=0.7,*;q=0.7"
        }
        # get and pre-process the response
        response = requests.get(url, headers=headers, proxies=proxies)
        assert response.status_code == 200
        response.encoding = response.apparent_encoding

        return response

    def extract_reviews(self, movie_id: str) -> dict:
        # get url according to movie_id sorted by user rating
        # because hight rates may be suitable for recommendation conditions
        url = f'https://www.imdb.com/title/tt{movie_id}/reviews?spoiler=hide&sort=userRating&dir=desc&ratingFilter=0'
        # url = f'https://www.imdb.com/title/tt{movie_id}/reviews?spoiler=hide&sort=helpfulnessScore&dir=desc&ratingFilter=0'

        # get response
        response = self._get_response(url)

        # process the reponse
        soup = BeautifulSoup(response.text, 'html.parser')

        # get reviews
        for idx, item in enumerate(soup.find_all('div', class_='lister-item-content')):
            # get rate
            rate = item.find('span', class_='rating-other-user-rating').text.strip('\n')
            # get title and review_link
            title_info = item.find('a', class_='title')
            title = title_info.text
            review_link = 'https://www.imdb.com/' + title_info['href']

            # get user_name and link
            user_info = item.find('span', class_='display-name-link')
            user_name = user_info.text
            user_link = 'https://www.imdb.com/' + user_info.find('a')['href']

            # get date
            review_date = item.find('span', class_='review-date').text

            # get review
            review = item.find('div', class_='text show-more__control').text

            return {
                'rate': rate,
                'title': title,
                'review_link': review_link,
                'user_name': user_name,
                'user_link': user_link,
                'review_date': review_date,
                'review': review
            }

    def extract_content_info(self, movie_id: str) -> dict:
        # get url according to movie_id
        url = 'https://www.imdb.com/title/tt' + movie_id + '/'

        # get response
        response = self._get_response(url)

        # process the reponse
        soup = BeautifulSoup(response.text, 'html.parser')

        # get introduction
        introduction = soup.find('span', class_="sc-16ede01-1 kgphFu").text

        # get rate
        rate = soup.find('span', class_="sc-7ab21ed2-1 jGRxWM").text

        # get category labels
        labels = [item.text for item in soup.find_all('span', class_="ipc-chip__text")]

        return {
            'introduction': introduction,
            'rate': rate,
            'labels': labels
        }


class GetMoviesInfo:
    def __init__(self, movies_saved_pth, saved_file_pth, nums_thread):
        self.movies_saved_pth = movies_saved_pth
        self.saved_file_pth = saved_file_pth
        self.nums_thread = nums_thread
        self.movie_id_names_lst = []
        self.IMDb_instance = imdb.IMDb()
        self.corpus_movie_id2contents = {}

        self._get_movie_names_lst()
        print(self.movie_id_names_lst)

    def _get_movie_info(self, movie_id):
        """
        use requests to get information of movie with movie_id from imdb website
        :param movie_id: id for a specific movie in imdb
        :type movie_id: str
        :return: movie_intro, movie_reviews
        :rtype: str, list[str]
        """

        return movie_intro, movie_reviews

    def _get_one_movie_id(self, corpus_movie_id, movie_name) -> None:
        def remove_years_in_movie_name(_movie_name):
            year = None
            if '(' in _movie_name:
                # 找到最后一个(，防止名称中本来就有(
                reverse_idx = _movie_name[::-1].index('(')
                left_bracket_pos = len(_movie_name) - reverse_idx - 1
            else:
                left_bracket_pos = len(_movie_name)
            movie_name_without_years = _movie_name[:left_bracket_pos].strip(' ')

            if ')' in _movie_name:
                reverse_right_bracket_idx = _movie_name[::-1].index(')')
                right_bracket_pos = len(_movie_name) - reverse_right_bracket_idx - 1
                year = _movie_name[left_bracket_pos + 1: right_bracket_pos]
            return movie_name_without_years, year

        movie_id = 'notFound'

        try:
            movies_obj = self.IMDb_instance.search_movie(movie_name)
            movie_name_without_years, year_from_name = remove_years_in_movie_name(
                movie_name)
            # 核验搜索结果的year是否与movie_name中的year一致
            for m in movies_obj:
                if year_from_name is None:
                    break
                try:
                    year = self.IMDb_instance.get_movie(m.movieID).data['year']
                except KeyError as k:
                    year = self.IMDb_instance.get_movie(m.movieID)['year']
                if year_from_name is not None and eval(year_from_name) == year:
                    movie_id = m.movieID
                    break
        except IndexError as e:
            movie_id = 'notFound'

        self.corpus_movie_id2contents[corpus_movie_id] = {
            "movieName": movie_id,
        }

        # Todo list by xansar:
        # design a function for crawling information from imdb

        self._get_movie_info(movie_id)

        print(movie_name)

    def _slice_get_movie_ids(self, start, stop) -> None:
        i = start
        while i < stop:
            corpus_movie_id = self.movie_id_names_lst[i][0]
            movie_name = self.movie_id_names_lst[i][1]
            try:
                self._get_one_movie_id(corpus_movie_id, movie_name)
            except imdb._exceptions.IMDbDataAccessError as e:
                i = max(start, i - 1)
            i += 1

    def _get_movie_names_lst(self):
        with jsonlines.open(self.movies_saved_pth) as reader:
            for obj in reader:
                self.movie_id_names_lst = [(k, obj[k]) for k in obj.keys(
                ) if k not in self.corpus_movie_id2contents.keys()]

    def get_movies_id(self):
        length = len(self.movie_id_names_lst)
        print(length)
        points_lst = []
        for i in range(self.nums_thread):
            point = int(length / self.nums_thread * i)
            points_lst.append(point)
        points_lst.append(length)

        threads = []
        for i in range(self.nums_thread):
            t = Thread(target=self._slice_get_movie_ids,
                       args=(points_lst[i], points_lst[i + 1]))
            threads.append(t)
        for t in threads:
            t.setDaemon(True)
            t.start()
        for t in threads:
            t.join()

    def write_to_jsonl(self):
        with jsonlines.open(self.saved_file_pth, mode='w') as writer:
            writer.write(self.corpus_movie_id2contents)


def merge_movies(data_pth) -> dict:
    z = {}
    with jsonlines.open(data_pth) as reader:  # 打开文件
        for obj in reader:
            z.update(obj['movieMentions'])
        return z


if __name__ == '__main__':
    """
    data_category_lst = ['train', 'valid', 'test']
    data_pth_lst = [
        f"D:\\documents\\KGSF-master\\data\\{c}_data.jsonl" for c in data_category_lst
        ]
    movies_saved_pth_lst = [
        f"D:\\documents\\KGSF-master\\data\\{c}_movies.jsonl" for c in data_category_lst
        ]
    tmp_dict = {}
    for i in range(len(data_pth_lst)):
        with jsonlines.open(movies_saved_pth_lst[i] , mode='w') as writer:
            movies = merge_movies(data_pth_lst[i])
            tmp_dict.update(movies)
            writer.write(movies)
    print(len(tmp_dict))
    """
    getMoviesID = GetMoviesInfo(
        movies_saved_pth='D:\\documents\\KGSF-master\\data\\test_movies.jsonl',
        saved_file_pth='D:\\documents\\KGSF-master\\data\\data_with_reviews\\test_movies.jsonl',
        nums_thread=16)
    getMoviesID.get_movies_id()
    getMoviesID.write_to_jsonl()
