#coding=utf8
import json
import os.path
import re
import sys
import urllib.request
import bs4
import codecs
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())##神呀，终于解决UnicodeEncodeError: 'ascii' codec can't encode characters in position 0-1: ordinal not in range(128)
# 0.请求页面
def get_html(url,filename):
    res_html = urllib.request.urlopen(url=urllib.request.Request(url=url), timeout=10).read().decode('utf-8')
    # print(filename+"ww2")
    with open('/www/wwwroot/default/temp/'+filename + '.text', 'w', encoding='utf-8') as v_html:
        v_html.write(res_html)  # w先清空再写入
        v_html.close()
    return filename

# 1.处理html
def htmlmanager(html):
    res_soup = bs4.BeautifulSoup(html, 'html.parser').find('div', class_='article_container row box')
    # print(res_soup)
    return res_soup


# 2.处理数据
def datamanager(soup):
    title = soup.find('h1').text##文章标题
    target1 = soup.find('div', id='post_content')
    img = target1.find('p').find('img')['src']##img

    target2 = target1.findAll('p')
    # 获取context
    contentarray = []
    # print(target2)
    for i in range(len(target2)):
        contentset = {}
        contentset['text'] = "".join(target2[i].text.split())

        if (i >0 and i <= len(target2) - 4):
            # print(i)
            # print(len(target2))
            # print(contentset['text'])
            contentarray.append(contentset)
    string1=json.dumps(contentarray,ensure_ascii=False)
    # print(string1)
    # string2=string1.replace('{\'text\': \'\'}, ', '')
    # exit()
    contentarray=json.loads(string1)
    # print(contentarray)
    # 处理下载列表
    downloadurllist = target1.find('table').find('tbody').findAll('tr')
    downloadurlarray = []
    for i in downloadurllist:
        downloadurlset = {}
        downloadurlset['downloadtext'] = i.find('a').text
        downloadurlset['downloadurl'] = i.find('a')['href']
        downloadurlarray.append(downloadurlset)
    downloadurlarray = json.loads(json.dumps(downloadurlarray).replace('{\'text\': \'\'}, ', ''))
    # print(downloadurlarray)

    #处理播放列表
    playlistdata=soup.findAll('div',class_='widget box row')
    # print(playlistdata)
    playurllist = []
    for i in playlistdata:
        if(i.find('a')):
            # print(i)
            playurlset1 = {}##外层{}
            playurlset1['playlist'] = i.find('h3').text
            playurllist2=[]##内层的list[]
            for index,b in enumerate(i.findAll('a')):
                playurlset2={}##内层list里面的set{}
                playurlset2['item'] = b['title']
                playurlset2['url']=b['href']
                playurllist2.append(playurlset2)
                playurlset1['itemurl']=playurllist2
                playurllist.append(playurlset1)
    # print(playurllist)
    dataset={}
    dataset['title']=title
    dataset['img']=img
    dataset['content']=contentarray
    dataset['downloasurl']=downloadurlarray
    dataset['playurl']=playurllist
    # print(dataset)
    return dataset


# 3.封装数据
#
#
#
# main函数
def main(movieId):
    filename = re.sub('(/.*?/)', '', movieId)

    if os.path.exists('/www/wwwroot/default/temp/'+filename + '.text'):
        # print(filename + ".text,已下载")
        with open('/www/wwwroot/default/temp/'+filename + '.text', 'r', encoding='utf-8') as html:
            # print(html.read())
            soup = htmlmanager(html.read())  # 处理html
            data=datamanager(soup)  # 处理数据
            html.close()
    if not os.path.exists('/www/wwwroot/default/temp/'+filename + '.text'):
        # print(filename + ".text,不存在")
        url = f'https://www.66s6.net{movieId}.html'
        get_html(url,filename)  # 下载html
        #写入文件
        with open('/www/wwwroot/default/temp/'+filename + '.text', 'r', encoding='utf-8') as html:
            # print(html.read())
            #处理html文件
            soup = htmlmanager(html.read())  # 处理html
            data=datamanager(soup)  # 处理数据
            html.close()
    print(json.dumps(data,ensure_ascii=False))
    return data


def TVdatamanager(soup):
    title = soup.find('h1').text  ##文章标题
    target1 = soup.find('div', id='post_content')
    img = target1.find('p').find('img')['src']  ##img

    target2 = target1.findAll('p')
    # 获取context
    contentarray = []
    # print(target2)
    for i in range(len(target2)):
        contentset = {}
        contentset['text'] = "".join(target2[i].text.split())

        if (i > 0 and i <= len(target2) - 4):
            # print(i)
            # print(len(target2))
            # print(contentset['text'])
            contentarray.append(contentset)
    string1 = json.dumps(contentarray, ensure_ascii=False)
    # print(string1)
    # string2=string1.replace('{\'text\': \'\'}, ', '')
    # exit()
    contentarray = json.loads(string1)
    # print(contentarray)
    # 处理下载列表
    downloadurllist = target1.find('table').find('tbody').findAll('tr')
    downloadurlarray = []
    for i in downloadurllist:
        downloadurlset = {}
        downloadurlset['downloadtext'] = i.find('a').text
        downloadurlset['downloadurl'] = i.find('a')['href']
        downloadurlarray.append(downloadurlset)
    downloadurlarray = json.loads(json.dumps(downloadurlarray).replace('{\'text\': \'\'}, ', ''))
    # print(downloadurlarray)

    # 处理播放列表
    playlistdata = soup.findAll('div', class_='widget box row')
    # print(playlistdata)
    playurllist = []
    for i in playlistdata:
        if (i.find('a')):
            # print(i)
            playurlset1 = {}  ##外层{}
            playurlset1['playlist'] = i.find('h3').text
            playurllist2 = []  ##内层的list[]
            for index, b in enumerate(i.findAll('a')):
                playurlset2 = {}  ##内层list里面的set{}
                playurlset2['item'] = b['title']
                playurlset2['url'] = b['href']
                playurllist2.append(playurlset2)
                playurlset1['itemurl'] = playurllist2
                playurllist.append(playurlset1)
    # print(playurllist)
    dataset = {}
    dataset['title'] = title
    dataset['img'] = img
    dataset['content'] = contentarray
    dataset['downloasurl'] = downloadurlarray
    dataset['playurl'] = playurllist
    # print(dataset)
    return dataset


def TVmain(movieId):
    filename = re.sub('(/.*?/.*?/)', '',movieId)
    # print(filename)
    if os.path.exists('/www/wwwroot/default/temp/'+filename + '.text'):
        with open('/www/wwwroot/default/temp/'+filename + '.text', 'r', encoding='utf-8') as html:
            soup = htmlmanager(html.read())
            data=TVdatamanager(soup)
            html.close()
    if not os.path.exists('/www/wwwroot/default/temp/'+filename + '.text'):
        url = f'https://www.66s6.net{movieId}.html'
        get_html(url,filename)
        with open('/www/wwwroot/default/temp/'+filename + '.text')as html:
            soup = htmlmanager(html.read())
            data=TVdatamanager(soup)
            html.close()
    print(json.dumps(data, ensure_ascii=False))
    return data


if __name__ == '__main__':
    # movieId="/dongzuopian/22776"
    # movieId = "/juqingpian/22909"
    # movieId = "/dianshiju/rihanju/22940"
    movieId=sys.argv[1]
    # print(movieId)
    if "/dianshiju/" in movieId:
#         print("tv")
        TVmain(movieId)
    else:
#         print("movie")
        main(movieId)