from threading import Thread, active_count
import requests as r
import traceback
import sys
import time
import re
import numpy as np
import random
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from lxml import etree
import thread

r.packages.urllib3.disable_warnings(InsecureRequestWarning)


class Downloader:
    def __init__(self, play_label_url, num, output):
        self.url = self.get_video_url(play_label_url)
        self.num = num
        self.output = output.replace('<', ' ').replace('>', ' ').replace('|', ' ').replace(':', ' ').replace('*', ' ').replace('?', ' ').replace('"', '\'').replace('\\', ' ').replace('/', ' ')
        self.content_length = -1
        self.data = {}  # 分段下载的数据
        self.CONNECT_TIMEOUT = 15
        self.READ_TIMEOUT = 8
        self.connect_time_list = [15, 15]
        self.read_time_list = [8, 8]
        self.active = []        # 当一个线程结束时，向其中 append 一个 1

        self.percent = 0.00

        for key in range(num):
            self.data[key] = b''

        self.threads = []
        self.var_main_thread = None

    def get_video_url(self, play_url):
        """
        得到视频的真实URL, 如： 'https://www.animefreak.tv/watch/charlotte/episode/episode-2'
        :param play_url: 播放页URL
        :return: 视频的真实URL
        """
        video_url = re.findall('var file = "(.*?)";\r\n', r.get(play_url, verify=False).text)[0]

        return video_url

    def run(self):
        print('\n')
        print('*' * 50)
        print('\n')
        print('[*] Download request received.')
        print('[+] VIDEO_URL = %s' % self.url)
        print('    THREAD_NUM = %d' % self.num)
        print('    OUTPUT_FILE = %s' % self.output)

        head = r.head(self.url, verify=False)
        
        content_length = int(head.headers['Content-Length'])  # size of all data
        print('[*] CONTENT_LENGTH = %d' % content_length)
        
        part_size = content_length // self.num  # size of one thread needed to download, except last
        self.content_length = content_length

        for key in range(self.num - 1):
            self.threads.append(Thread(target=self.download, args=(key, part_size * key, part_size * (key + 1) - 1,)))
            # 分配每个线程需要下载的数据区间，最后一个线程除外
            # print(part_size * key, '-', part_size * (key + 1))
            self.threads[-1].setDaemon(True)
            self.threads[-1].start()
            sys.stdout.write('\r[*] %d threads started. (%d in total)' % (key+1, self.num))
            

        key = self.num - 1
        self.threads.append(Thread(target=self.download, args=(key,
                                                               part_size * key,
                                                               content_length-1,
                                                               )))
        # print(part_size * key, '-', content_length)
        self.threads[-1].setDaemon(True)
        self.threads[-1].start()
        sys.stdout.write('\r[*] %d threads started. (%d in total)' % (key+1, self.num))
        # print(self.threads[-1], ' started.')
        
        print('\n[*] Donwnloading...')
        
        self.var_main_thread = Thread(target=self.main_thread)
        self.var_main_thread.setDaemon(True)
        self.var_main_thread.start()

        for thread_ in self.threads:
            thread_.join()

        thread.stop_thread(self.var_main_thread)
        print('\n[*] Donwnload completed')
        print('[*] Merging...')
        with open(self.output, 'wb') as f:
            for key in range(self.num):
                f.write(self.data[key])

        print('[*] Completed.')

    def download(self, key, start_size, end_size):
        read_extra_timeout = 0
        connect_extra_timeout = 0

        while 1:
            try:
                if read_extra_timeout >= 16:
                    read_extra_timeout = 2

                start_connect_time = time.time()  # 开始连接的时间戳
                response = r.get(self.url, headers={"Accept": "*/*",
                                                    "Accept-Encoding": "identity;q=1, *;q=0",
                                                    "Accept-Language": "zh-CN,zh;q=0.9,ja;q=0.8",
                                                    "Connection": "keep-alive",
                                                    "Host": "st11.anime1.com",
                                                    'Range': 'bytes={}-{}'.format(start_size, end_size),
                                                    "Referer": "https://www.animefreak.tv/",
                                                    "Sec-Fetch-Dest": "video",
                                                    "Sec-Fetch-Mode": "no-cors",
                                                    "Sec-Fetch-Site": "cross-site",
                                                    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
                                                                  "AppleWebKit/537.36 (KHTML, like Gecko) "
                                                                  "Chrome/85.0.4183.83 Safari/537.36",
                                                    },
                                 stream=True, timeout=3 + connect_extra_timeout + self.CONNECT_TIMEOUT, verify=False)
                self.connect_time_list.append(time.time() - start_connect_time)

                start_read_time = time.time()
                for data in response.iter_content(chunk_size=int(1024 * 1024 * 0.2)):
                    self.data[key] += data
                    self.read_time_list.append(time.time() - start_read_time)
                    if time.time() - start_read_time >= 2 + read_extra_timeout + self.READ_TIMEOUT:
                        raise RuntimeError  # 响应时间过长，再来一次
                    # 保存读取时间至全部读取时间，用 全部读取时间 / 读取速度 = READ_TIMEOUT
                    start_read_time = time.time()

                # print('download thread-%d completed.' % key)
                self.active.append(1)
                return 0

            except RuntimeError:  # 读取超时
                # print(traceback.format_exc())
                self.data[key] = b''
                read_extra_timeout += 1

            except r.exceptions.ReadTimeout:  # 读取超时
                connect_extra_timeout += 2
                self.data[key] = b''
                self.connect_time_list.append(self.CONNECT_TIMEOUT + connect_extra_timeout + 3)

            except r.exceptions.ConnectTimeout:  # 连接超时
                # print(traceback.format_exc())
                self.data[key] = b''
                self.connect_time_list.append(self.CONNECT_TIMEOUT + connect_extra_timeout + 3)
                connect_extra_timeout += 2

            except r.exceptions.ConnectionError:  # 连接超时
                self.data[key] = b''
                self.connect_time_list.append(self.CONNECT_TIMEOUT + connect_extra_timeout + 3)
                connect_extra_timeout += 2

            except:
                self.data[key] = b''
                print(traceback.format_exc())

    def main_thread(self):
        """
        向控制台打印信息，并处理 READ_TIMEOUT 和 CONNECT_TIMEOUT
        :return: None
        """
        while 1:
            self.READ_TIMEOUT = float(np.mean(self.read_time_list))
            self.CONNECT_TIMEOUT = float(np.mean(self.connect_time_list))
            # os.system('cls')
            # print('-'*50)

            data_size = 0
            for key in self.data:
                data_size += sys.getsizeof(self.data[key])

            self.percent = data_size / self.content_length

            sys.stdout.write(
                '\rdownloading... > %d%% [%s%s] (%d threads active, %d in total) READ_TIMEOUT=%0.2f  CONNECT_TIMEOUT=%0.2f' % (
                    int(self.percent * 100),
                    '#' * int(self.percent * 20),
                    ' ' * (20 - int(self.percent * 20)),
                    self.num - len(self.active),  # 减二是因为主线程与未知线程不负责下载
                    self.num,
                    self.READ_TIMEOUT,
                    self.CONNECT_TIMEOUT
                ))

            time.sleep(0.03)

    def start(self):
        self.run()
        # Thread(target=self.run).start()


class SearchByKeyword:
    def __init__(self, keyword):
        self.keyword = keyword

        self.search_anime_info_dict = r.get('https://www.animefreak.tv/search/topSearch?q=%s' % keyword).json()
        # 向这个网址发送GET请求，返回搜索到的有关信息的JSON形式，见 0.png
        # 以下是请求结果样板：
        # {
        #     "status":1,           # JSON.status
        #     "data":[              # JSON.data
        #         {"rel":17.759033203125,       # JSON.data[0].rel
        #          "anime_id":8374,
        #          "name":"Lord El-Melloi II Sei no Jikenbo: Rail Zeppelin Grace Note",
        #          "has_image":1,
        #          "seo_name":"lord-el-melloi-ii-sei-no-jikenbo-rail-zeppelin-grace-note",
        #          "score_count":38,
        #          "score":8.35,
        #          "aired":1562442554,
        #          "episodes":[
        #              {"episode_id":144323,                # JSON.data[0].episodes[0].episode_id
        #               "episode_seo_name":"episode-13",
        #               "episode_name":"Episode 13"}
        #          ]
        #          },
        #         ...,
        #         {"rel":0,
        #          "anime_id":5475,
        #          "name":"Toaru Kagaku no Railgun Specials",
        #          "has_image":1,
        #          "seo_name":"toaru-kagaku-no-railgun-specials",
        #          "score_count":5,
        #          "score":9.8,
        #          "aired":1368527384,
        #          "episodes":[
        #              {"episode_id":88492,
        #               "episode_seo_name":"special-2",
        #               "episode_name":"Special 2"}
        #          ]
        #          }
        #     ]
        # }

        # 图片url -> https://www.animefreak.tv/meta/anime/8541/sword-art-online-alicization-war-of-underworld-2nd-season.jpg
        # 图片url -> https://www.animefreak.tv/meta/anime/{JSON.data[0].anime_id}/{JSON.data[0].seo_name}.jpg

        self.anime_info_complied_list = []  # 整理过的动漫信息

        for anime_info_read_dict in self.search_anime_info_dict['data']:
            # print(anime_info_read_dict)
            anime_info_temp_dict = {  # 临时的动漫信息字典，将被添加到 anime_info_complied_list 中
                'name': anime_info_read_dict['name'],
                'seo_name': anime_info_read_dict['seo_name'],
                'update_to': anime_info_read_dict['episodes'][0]['episode_name'],
                'img': f'https://www.animefreak.tv/meta/anime/'
                       f'{anime_info_read_dict["anime_id"]}/{anime_info_read_dict["seo_name"]}.jpg'
            }

            self.anime_info_complied_list.append(anime_info_temp_dict)


class HomePage:
    def __init__(self):
        self.home_page_HTML = etree.HTML(r.get('https://www.animefreak.tv/').text)  # HTML 格式的 主页源代码
        self.random_anime_info = anime_list.get_random_anime_info()

    def get_latest_updates(self):
        """
        获取最近更新的动漫
        :return: [{
                'series_name': series_name_list[index].text,
                'image': image_list[index].attrib['src'],
                'episode_name': episode_name_list[index].text,
                'update_time': update_time_list[index].text
            }, ...]
        """
        latest_updates_list = []
        image_list = self.home_page_HTML.xpath('/html/body/div[2]/div[3]/div/div[2]/div/a/img')
        episode_name_list = self.home_page_HTML.xpath('/html/body/div[2]/div[3]/div/div[2]/div/div/a[1]')
        series_name_list = self.home_page_HTML.xpath('/html/body/div[2]/div[3]/div/div[2]/div/div/a[2]')
        update_time_list = self.home_page_HTML.xpath('/html/body/div[2]/div[3]/div/div[2]/div/div/div')
        # list 内的元素 为 <Element img at 0x????????>

        for index in range(len(image_list)):  # 获取的番剧数量与图片数量一致
            latest_updates_list.append({
                'series_name': series_name_list[index].text,
                'image': image_list[index].attrib['src'],
                'episode_name': episode_name_list[index].text,
                'update_time': update_time_list[index].text
            })

        return latest_updates_list

    def get_random_anime(self):
        self.random_anime_info = anime_list.get_random_anime_info()
        return self.random_anime_info

    def get_new_anime(self):
        """
        获取最近的动漫
        :return: {
            'name': series_name,
            'genres': genres,
            'href': href,
            'cover': cover
        }
        """
        cover = self.home_page_HTML.xpath(
            '/html/body/div[2]/div[4]/div/div[1]/div/div/div/div/div/a[1]/img/attribute::src'
        )
        series_name = self.home_page_HTML.xpath(
            '/html/body/div[2]/div[4]/div/div[1]/div/div/div/div/div/a[2]/text()'
        )
        href = self.home_page_HTML.xpath(
            '/html/body/div[2]/div[4]/div/div[1]/div/div/div/div/div/a[2]/attribute::href'
        )

        genres = []
        for index in range(len(cover)):
            signal_anime_genres = self.home_page_HTML.xpath(
                f'/html/body/div[2]/div[4]/div/div[1]/div/div/div/div[{index+1}]/div/div[1]/a/text()'
            )
            genres.append(''.join([i+', ' for i in signal_anime_genres]))

        # description = self.home_page_HTML.xpath(
        #     '/html/body/div[2]/div[4]/div/div[1]/div/div/div/div/div/div[2]/div[2]/div[3]/text()'
        # )

        return {
            'name': series_name,
            'genres': genres,
            'href': href,
            'cover': cover
        }


class View:
    def __init__(self, view_url):
        """
        给定预览页的URL
        :param view_url: 预览页的URL
        """
        self.view_url = view_url  # 预览页的网址
        self.view_HTML = etree.HTML(r.get(self.view_url).text)
        # https://www.animefreak.tv/watch/clannad
        # print(self.view_url)

        # DEBUG  CODE
        # import requests as r;from lxml import etree;view_url = "https://www.animefreak.tv/watch/clannad";view_HTML = etree.HTML(r.get(view_url).text)
        # view_url = "https://www.animefreak.tv/watch/clannad"  # 预览页的网址
        # view_HTML = etree.HTML(r.get(view_url).text)
        #

    def get_anime_info(self):
        """
        获取动漫有关内容
        :return: {
            'name': name,
            'description': description,
            'genres': genres,
            'rating': rating,
            'status': status,
            'type': type,
            'first_aired': first_aired,
            'episodes_info': list(zip(episodes_name, episodes_release_time, episodes_href)),       # 顺序从集数大到小
            'cover': cover
        }
        """
        name = self.view_HTML.xpath('/html/body/div[2]/div[4]/div/div[1]/div[2]/div')[0].text  # 名称
        _description = self.view_HTML.xpath('/html/body/div[2]/div[4]/div/div[1]/div[2]/p[1]/text()')  # 简介
        # 简介可能由众多空格开头，需要处理
        description = ''
        for index in range(len(_description)):
            if _description[index] == ' ':
                pass
            else:
                description = _description[index:]
                break

        description_ = description[0].replace('\r\n                    ', '').replace('\r\n                ', '').replace('\n', '').split(' ')
        description = []

        sentence = ''
        for word in description_:   # 确保字符串长度正常 遍历每个单词
            if len(sentence + ' ' + word) <= 60:
                sentence += f' {word}'
            else:
                description.append(sentence)
                sentence = word

        if sentence:
            description.append(sentence)

        genres = ''.join(
            [(i.text + ', ') for i in self.view_HTML.xpath('/html/body/div[2]/div[4]/div/div[1]/div[4]/div[2]/a')])

        rating = self.view_HTML.xpath('/html/body/div[2]/div[4]/div/div[1]/div[4]/div[4]/text()')[1].\
            replace('\r', '').replace('\n', '')

        status = self.view_HTML.xpath('/html/body/div[2]/div[4]/div/div[1]/div[4]/div[5]/text()')[1].\
            replace('\r', '').replace('\n', '')

        type = self.view_HTML.xpath('/html/body/div[2]/div[4]/div/div[1]/div[4]/div[6]/a/text()')[0]

        first_aired = self.view_HTML.xpath('/html/body/div[2]/div[4]/div/div[1]/div[4]/div[7]/text()')[1].\
            replace('\r', '').replace('\n', '')

        episodes_name_ = self.view_HTML.xpath('/html/body/div[2]/div[4]/div/div[3]/div[1]/div/div[2]/ul/li/a/text()')
        episodes_name = []
        for item in episodes_name_:  # 获取的文本格式混乱，需要经过以下处理
            episodes_name.append(
                item.replace('\r\n\r\n                                      ', '')
                    .replace('\r\n\r\n                                ', '')
            )

        episodes_release_time_ = self.view_HTML.xpath(
            '/html/body/div[2]/div[4]/div/div[3]/div[1]/div/div[2]/ul/li/text()')
        episodes_release_time = []
        for item in episodes_release_time_:  # 获取的文本格式混乱，需要经过以下处理
            if item == '\r\n                                ':
                continue
            else:
                episodes_release_time.append(
                    item.replace('\r\n                                \r\n                                ', '')
                        .replace('\r\n                                \r\n                            ', '')
                )
        episodes_href = self.view_HTML.xpath('/html/body/div[2]/div[4]/div/div[3]/div[1]/div/div[2]/ul/li/a/attribute::href')
        cover = self.view_HTML.xpath('/html/body/div[2]/div[4]/div/div[1]/div[1]/img')[0].attrib['src']

        return {
            'name': name,
            'description': description,
            'genres': genres,
            'rating': rating,
            'status': status,
            'type': type,
            'first_aired': first_aired,
            'episodes_info': list(zip(episodes_name, episodes_release_time, episodes_href)),  # 顺序从集数大到小
            'cover': cover
        }


class AnimeList:
    def __init__(self):
        self.anime_list_HTML = etree.HTML(r.get('https://www.animefreak.tv/home/anime-list').text)
        self.anime_list = self.anime_list_HTML.xpath('/html/body/div[2]/div[3]/div/div[1]/div/ul/li/a')
        # 所有动漫都在这个列表里，每一个元素都是 <Element a at 0x????????> 类型的
        # 使用 .text 取出名称    使用 .attrib['href'] 取出连接

    def get_anime_info_by_index(self, index):
        """
        给定一个索引值来获取动漫有关信息
        :return: 字典，包括动漫名称、链接、封面、类型、状态、首播时间、简介
        """
        anime_view = View(self.anime_list[index].attrib['href']).get_anime_info()

        return anime_view
        # anime_list_HTML.xpath('/html/body/div[2]/div[3]/div/div/div/ul/li/a')

    def get_random_anime_info(self):
        return self.get_anime_info_by_index(random.randint(0, len(self.anime_list) - 1))


anime_list = AnimeList()

if __name__ == '__main__':
    # home_page = HomePage()
    # view = View()
    Downloader("https://www.animefreak.tv/watch/rezero-kara-hajimeru-isekai-seikatsu-2nd-season-part-2/episode/episode-6", 256, "Re:Zero kara Hajimeru Isekai Seikatsu 2nd Season Part 2_Episode 6.mp4").start()
# /html/body/div[2]/div[3]/div/div[1]/div/ul/li/a
# while 1:
    # try:
        # exec(input('> > > '))
    # except SystemExit:
        # print('SystemExit')
    # except KeyboardInterrupt:
        # print('KeyboardInterrupt')
    # except Exception as err:
       # print(err)
