import re
import requests
import time
import os
import sys
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
import msvcrt
import subprocess
from bs4 import BeautifulSoup
import lxml



def search(search_name):
    url = "http://www.kuaikan66.com/search/"

    headers = {
            'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36'
        }
    params = {"wd":search_name}

    html = requests.get(url, headers = headers, params = params).content.decode()
    
    # 要匹配到所有的信息用于返回
    regexp_playimg     = '<a class="play-img" target=".*?" href=".*?"><img src="(.*?)" alt=".*?" /></a>'       # 匹配搜索结果的展示图片

    regexp_playtxt_div = '<div class="play-txt">(.*?)</div>'        # 匹配所有搜索结果的的描述部分
    
    # 详细介绍部分
    regexp_name = '<h5><a href=".*?">(.*?)</a></h5>'      
    regexp_link = '<h5><a href="(.*?)">.*?</a></h5>'      
    regexp_time = '<p class="type"><em>.*?</em>(.*?)</p>' 
    regexp_plot = '<p class="plot"><em>.*?</em>(.*?)</p>' 

    play_image = re.findall(regexp_playimg, html, re.S)         # 动漫展示图片
    playtxt_div = re.findall(regexp_playtxt_div, html, re.S)    # re的匹配方法
    
    comic_actor = []        # 主演
    comic_type = []         # 类型
    comic_name = []
    comic_link = []
    comic_time = []
    comic_plot = []
    
    for div_txt in playtxt_div:
        soup = BeautifulSoup(div_txt, 'lxml')                   # bs的匹配方法

        comic_name.append(re.findall(regexp_name, div_txt, re.S)[-1])     # 动漫名称
        comic_link.append(re.findall(regexp_link, div_txt, re.S)[-1])     # 动漫目录页链接    需要加前缀
        comic_time.append(re.findall(regexp_time, div_txt, re.S)[-1])     # 更新时间
        comic_plot.append(re.findall(regexp_plot, div_txt, re.S)[-1])     # 剧情
        
        
        p_meta = soup.findAll('p', {'class':'actor'})
        sub_actor = []
        for j in p_meta:
            a_meta = j.findAll('a')
            for k in a_meta:
                sub_actor.append(k.get_text())
        comic_actor.append(sub_actor)

        
        p_meta = soup.findAll('p', {'class':'type fn-left'})
        sub_type = []
        for j in p_meta:
            a_meta = j.findAll('a')
            for k in a_meta:
                sub_type.append(k.get_text())
        comic_type.append(sub_type)
                
                
    
    # 将各种信息组合，以单个漫画为元素组成列表
    search_info = []
    for i in comic_name:
        index = comic_name.index(i)
        # 按照动漫名称、目录页链接、展示图片链接、主演、类型、更新时间、剧情组成元组，然后再放进列表
        comic_meta = (i, "http://www.kuaikan66.com/"+comic_link[index], play_image[index], comic_actor[index], comic_type[index], comic_time[index], comic_plot[index])
        search_info.append(comic_meta)

    return search_info
       
        
#==========================================启动搜索任务==========================================#

# 用于GUI端拼接展示信息
def get_detail(result_info):
    # 其实就是对search函数最后的组包过程的解包，并组合成字符串，上边不组合是因为可能信息还要提取出来用
    comic_name = result_info[0]
    comic_link = result_info[1]
    comic_actor = result_info[3]    # 这是个列表
    comic_type = result_info[4]     # 这是个列表
    comic_time = result_info[5]
    comic_plot = result_info[6]

    actors = ''
    types  = ''
    for i in comic_actor:
        actors = actors + i + ' '
    for j in comic_type:
        types = types + j + ' '

    # 格式化字符串
    detail_info = "动漫名称：%s\n主页链接：%s\n主演：%s\n类型：%s\n更新时间：%s\n剧情：%s" % (comic_name, comic_link, actors, types, comic_time, comic_plot)
    return detail_info
    
if __name__ == '__main__':    
    a = search("斗罗大陆")
    b = get_detail(a[0])
    print(b)
