from tornado.web import RequestHandler
import tornado
from urllib import parse
from ORM.elasticsearch_api import elasticsearch_data
import time
from analysis import go_torrents
import os
class SearchHandler(RequestHandler):    #搜索页面
    @tornado.gen.coroutine
    def get(self, *args, **kwargs):   #路径直接访问
        stat_time = time.time()
        moviename = args[0].split("=")[1].split("&")[0] #搜索内容
        page_num =args[0].split("=")[1].split("&")[1] #页数
        url_path = self.request.path  #加密的url
        name_path = url_path.split("/")[2]  # 加密的搜索内容 + ？page=
        join_path = (name_path).split("&")[0]
        movieapi = elasticsearch_data()
        try:
            movies =movieapi.get_datas_by_query("dht5","dht_type5",moviename)["hits"]["hits"]  #取出对应的数据
            #还需进行判断数据长度，分页显示
            num = len(movies)
            if num >0 :
                if num <= 20 :
                    pages_num = 1
                elif num >20 and num % 20 == 0:
                    pages_num = num // 20
                else :
                    pages_num = (num // 20) + 1
                movie_time = round(time.time() -stat_time,5)
                self.render("search.html",movie_time=movie_time,num=num,moviename=moviename,movies =movies,pages_num=pages_num, page_num=page_num,name_path=name_path,join_path=join_path )
            else:
                file_name, file_encode,movie_name= file_static()#拆包
                self.render("search_null.html", movie_name=movie_name,file_name=file_name, file_encode=file_encode, moviename=moviename)
        except IndexError:
            file_name, file_encode,movie_name= file_static() #拆包
            self.render("search_null.html",movie_name=movie_name, file_name=file_name, file_encode=file_encode, moviename=moviename)



    @tornado.gen.coroutine
    def post(self, *args, **kwargs):
        stat_time = time.time()
        moviename = self.get_body_argument("moviename")
        url_path = self.request.path
        name_path = url_path.split("/")[2]      #加密的搜索内容 + ？page=
        join_path =(name_path).split("&")[0]
        page_num = int(parse.unquote(name_path).split("&")[1])      #跳转页数,
        movieapi=elasticsearch_data()       #实列化对象
        try:
            movies =movieapi.get_datas_by_query("dht5","dht_type5",moviename)["hits"]["hits"]  #取出对应的数据
            #还需进行判断数据长度，分页显示
            num = len(movies)
            if num >0 :
                if num <= 20 :
                    pages_num = 1
                elif num >20 and num % 20 == 0:
                    pages_num = num // 20
                else :
                    pages_num = (num // 20) + 1
                movie_time = round(time.time() -stat_time,5)
                self.render("search.html",movie_time=movie_time,num=num,moviename=moviename,movies =movies,pages_num=pages_num, page_num=page_num,name_path=name_path,join_path=join_path )
            else:
                file_name, file_encode,movie_name = file_static()
                self.render("search_null.html",movie_name=movie_name, file_name=file_name, file_encode=file_encode, moviename=moviename)
        except IndexError:
            file_name,file_encode,movie_name = file_static() #拆包
            self.render("search_null.html",movie_name=movie_name,file_name=file_name, file_encode=file_encode, moviename=moviename)
        # url = urllib.parse.unquote(self.request.path)
        # self.render("search.html",moviename=movie)

class DetailHandler(RequestHandler):   #详情页面
    @tornado.gen.coroutine
    def get(self, *args, **kwargs):    #路径直接访问
        stat_time = time.time()
        moviename = args[0].split("/")[1]
        movieapi = elasticsearch_data()  # 实列化对象
        movies = movieapi.get_datas_by_query("dht5", "dht_type5", moviename)["hits"]["hits"][0]["_source"]  # 取出对应的数据
        # 还需要对数据进行分析处理，拿到种子名字filename和种子信息res进行生成种子，传入页面
        file_seed= go_torrents(movies["files_name"],movies["file_data"])
        if len(movies["files_name"]) > 20:
            file_name = movies["files_name"][0:20]
        else:
            file_name = movies["files_name"]
        file_names = file_name +".torrent"
        movie_time = round(time.time() - stat_time,5)
        self.render("particulars.html",moviename=moviename,movie_time=movie_time,movies=movies,file_names=file_names)



class Search_nullHandler(RequestHandler):
    @tornado.gen.coroutine
    def get(self, *args, **kwargs):
        moviename= " "
        file_name, file_encode,movie_name = file_static()
        self.render("search_null.html",movie_name=movie_name, file_name=file_name, file_encode=file_encode, moviename=moviename)

class MainHandler(RequestHandler):
    @tornado.gen.coroutine
    def get(self, *args, **kwargs):
        try:
            filename = args[0] # http头 浏览器自动识别为文件下载
            self.set_header('Content-Type', 'application/octet-stream') # 下载时显示的文件名称
            self.set_header('Content-Disposition', 'attachment; filename=%s'%filename)
            file_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))#DHT磁力，远程到达dht_run路径
            os.chdir(file_path + "/" + "file_cache")  # 工作路径
            with open(filename, 'rb') as f:
                while True:
                    data = f.read()
                    if not data:
                        break
                    self.write(data)  # # 记得有finish哦
                    self.finish()       #刷新缓存

        except:     #tornado.httputil.HTTPOutputError: Tried to write 15080 bytes less than Content-Length
            pass

def file_static():
    """
    #获取固定的十个数据，放在错误页面
    :return:
    """
    movieapi = elasticsearch_data()  # 实列化对象
    files = movieapi.query_all_data("dht5", "dht_type5")["hits"]["hits"][0:10]
    file_name=[]
    movie_name=[]
    file_encode=[]
    for i in files:
        name = i["_source"]["files_name"]
        s = parse.quote("?name=" + name) + "&" + "1"
        file_encode.append(s)
        if len( name) > 20:
            names =  name[0:20]
        else:
            names =  name
        file_name.append(names)
        movie_name.append(name)
    return file_name,file_encode,movie_name
