import re
import os
import requests
from bs4 import BeautifulSoup

import tools.tools

# path = lambda p: os.path.abspath(
#     os.path.join(os.path.dirname(__file__), p)
# )

dir_path = os.path.split(__file__)[0]

tool = tools.tools.Tools()


def path(p):
    return os.path.abspath(
        os.path.join(os.path.dirname(__file__), p)
    )


def set_ml_to_yaml():
    with open(dir_path.replace('function', 'data\\%s' % 'movielink.txt')) as txt_file:
        lines = txt_file.readlines()

    ml_list = []
    for i in range(2):
        sub_movie_list = []
        set_header(lines, sub_movie_list, i * 20)
        ml_list.append({'page_%s' % tool.num_to_eng(i + 1): sub_movie_list})

    tool.yaml_dump(dir_path.replace('function', 'data\\%s' % 'movielink.yaml'), ml_list)


def set_header(stream, ml_list, header):
    for l in stream:
        l = l.strip('\n') + '?start=%s' % str(header)
        ml_list.append(l)


def get_ml():
    # set_ml_to_yaml()

    return tool.yaml_load(dir_path.replace('function', 'data\\%s' % 'movielink.yaml'))


def get_user_link():
    """
    top250影片前两页的评论用户，包括重复
    :return:
    """
    i = 0

    ml_list = get_ml()

    user_list = []

    # request = requests.get(movie_list[0])
    # _soup = BeautifulSoup(request.text, 'html.parser')
    # print(_soup.find_all('a', 'author'))
    #
    # a_list = _soup.find_all('a', 'author')
    # for l in a_list:
    #     str_l = tool.replace(l.get_text())
    #     if u"[已注销]" not in str_l:
    #         print(i)
    #         i += 1
    #         user_dict = {str_l: l.get('href')}
    #         user_list.append(user_dict)
    #
    # print(user_list)
    #
    # yaml_file = open(dir_path.replace('function', 'data\\%s' % 'userdict.yaml'), 'w', encoding='utf-8')
    # yaml.dump(user_list, yaml_file, allow_unicode=True)
    # yaml_file.close()

    for index in range(len(ml_list)):
        sub_ml_list = ml_list[index].get('page_%s' % tool.num_to_eng(index + 1))
        for ml in sub_ml_list:
            request = requests.get(ml)
            if request.status_code == requests.codes.ok:
                soup = BeautifulSoup(request.text, 'html.parser')
                # print(_soup.find_all('a', 'author'))

                a_list = soup.find_all('a', 'author')
                for l in a_list:
                    str_l = tool.replace(l.get_text())
                    if u"[已注销]" not in str_l:
                        print(i)
                        i += 1
                        user_dict = {str_l: l.get('href').replace('https://www.douban.com/people/',
                                                                  'https://movie.douban.com/people/')}
                        user_list.append(user_dict)

    tool.yaml_dump(dir_path.replace('function', 'data\\%s' % 'userdict.yaml'), user_list)


def user_duplicate_removal():
    """
    去重
    :return:
    """
    # get_user_link()

    user_list = tool.duplicate_removal(tool.yaml_load(dir_path.replace('function', 'data\\%s' % 'userdict.yaml')))
    tool.yaml_dump(dir_path.replace('function', 'data\\%s' % 'userdict_nodp.yaml'), user_list)


def user_more_than(num, c, _position, stream_in, stream_out):
    """
    筛选
    :param num: 要筛选的数量
    :param c: curses对象
    :param _position: 进度条在控制台中的位置
    :param stream_in:
    :param stream_out: 输出流
    :return:
    """
    # user_duplicate_removal()
    user_list_ndp = tool.yaml_load(stream_in)

    pattern = re.compile('https://movie.douban.com/people/([\s\S]*)/reviews\?cat_id=1002', re.S)
    length = len(user_list_ndp)
    index = 0
    user_list_mt = []
    keys = []

    for user_dict in user_list_ndp:
        keys.append(list(user_dict)[0])

    for key in keys:
        request = requests.get(user_list_ndp[index][key])
        if request.status_code == requests.codes.ok:
            soup = BeautifulSoup(request.text, 'html.parser')
            for content in soup.find_all(href=pattern):
                count = int(re.search(re.compile('(\d*)', re.S), content.get_text()).group(0))
                if count >= num:
                    user_list_mt.append(user_list_ndp[index])
        tool.thread_bar(index, length, c, _position, stream_out)
        index += 1

    tool.yaml_dump(stream_out, user_list_mt)

    # request = requests.get(user_list_ndp[0][keys[0]])
    # if request.status_code == requests.codes.ok:
    #     soup = BeautifulSoup(request.text, 'html.parser')
    #     # print(soup.find_all(href=pattern))
    #     for ct in soup.find_all(href=pattern):
    #         count = int(re.search(re.compile('(\d*)', re.S), ct.get_text()).group(0))
    #         print(count)
    #         if count > num:
    #             user_list_mt.append(user_list_ndp[0])
    #             print(user_list_mt)


if __name__ == "__main__":
    pass
