'''
1. 利用UI方式爬取页面所有信息
2. 需要以文件方式进行存下来
3. 需要有中断功能，中断后再次继续需要读取文件的信息，然后继续爬取新信息
4. 需要有更新功能，对比已有文件，与线上文件的差异，进行更新，并生成新的差异文件
'''
import time
from setting import elements, setting
from base.base_ui_op import open_page
from util.write_data import write_data
from util.read_data import read_data
from spider.spider_file_tree import analysis_json, key_to_value1
import copy


# json模板文件匹配
def json_template(data, template):
    pass


# 获取首页分类url
def get_category_url(driver):
    json_data = {}
    # 获取首页分类栏中的标签
    elements_list = driver.find_elements_by_xpath(elements.category_element)
    # 获取所有标签的文本信息
    for ele in elements_list:
        href_list = ele.find_elements_by_xpath('a')
        category_text_list = ele.text.split('：')
        head = category_text_list[0]
        if ele == elements_list[-1]:
            category_list = head.split('丨')
            head = list(json_data.keys())[-1]
        else:
            category_list = category_text_list[1].split('丨')
        if len(category_list) != len(href_list):
            href_list = href_list[1:]
        for index, cla in enumerate(category_list):
            if head not in json_data:
                json_data[head] = {}
            json_data[head][cla] = {
                'href': href_list[index].get_attribute('href')
            }
    return json_data


# 获取每个分类列表页的url
def get_category_list_url(driver, json_data):
    for category in json_data.keys():
        for cl in json_data[category].keys():
            # print(cl, json_data[category][cl]['href'])
            driver.get(json_data[category][cl]['href'])
            time.sleep(1)
            page_count = int(driver.find_element_by_xpath(elements.count_category_page).text.split('/')[-1])
            print('{} 共计 {} 页'.format(cl, page_count))
            detail_url_list = []
            for page in range(page_count):
                li_element_list = driver.find_elements_by_xpath(elements.category_li_element_list)
                print('>>获取 第{}页 详情 '.format(page + 1))
                # print(len(li_element_list))
                for index, ele in enumerate(li_element_list):
                    print('>>>>获取 {} 详情页信息'.format(index + 1), ele.get_attribute('title'), ele.get_attribute('href'))
                    tmp_data = {'name': ele.get_attribute('title'), 'adress': ele.get_attribute('href')}
                    detail_url_list.append(copy.deepcopy(tmp_data))
                if page == page_count - 1:
                    print('last_page {}'.format(page))
                else:
                    try:
                        driver.find_element_by_xpath(elements.next_page).click()
                    except Exception as e:
                        print('点击报错：错误信息{}\n category--{}\ncl--{}\nurl--{}\n'.format(e, category, cl,
                                                                                     json_data[category][cl][
                                                                                         'href']))
                        continue
                # time.sleep(1)
                json_data[category][cl]['detail_url_list'] = detail_url_list
    write_data('53info', json_data, 'json')
    return json_data


# 获取详情页信息
def get_page_detail(driver):
    # 获取详情页url
    data = read_data('53info.json')
    exp = '$.[*].[*].detail_url_list[*].adress'
    detail_url_list = analysis_json(data, exp)
    print(len(detail_url_list))
    for num, url in enumerate(detail_url_list):
        c, cl, index = key_to_value1(url)
        print('>>>>>>>>>>>>>>>>正在获取第{}条>>>>>>>>>>>>>>>>信息--{}--{}--{}'.format(num, c, cl, index))
        driver.get(url)
        li_list = driver.find_elements_by_xpath(elements.detail_li_text)
        print(c, cl, index)
        for i in li_list:
            if '课件星级' not in i.text:
                p_var = i.text.split('：')
                data[c][cl]['detail_url_list'][index][p_var[0]] = p_var[1]
        print(data[c][cl]['detail_url_list'][index])
    write_data('53_detail_info', data, file_type='json')


def video_size():
    data = read_data('53_detail_info.json')
    count_size = []
    for i in data.keys():
        for j in data[i].keys():
            total = len(data[i][j]['detail_url_list'])
            video_dict = {'MB': 0.0, 'GB': 0.0}
            count_num = 0
            error_count = 0
            for info in data[i][j]['detail_url_list']:
                try:
                    if 'MB' in info['视频大小']:
                        count_mb = info['视频大小'].split('MB')[0]
                        if count_mb:
                            if ',' in count_mb:
                                count_mb = count_mb.replace(',', '.')
                            elif '。' in count_mb:
                                count_mb = count_mb.replace('。', '.')
                            video_dict['MB'] += float(count_mb)
                        else:
                            video_dict['MB'] += 0.0
                    elif 'GB' in info['视频大小']:
                        count_gb = info['视频大小'].split('GB')[0]
                        if count_gb:
                            if ',' in count_gb:
                                count_gb = count_gb.replace(',', '.')
                            elif '。' in count_gb:
                                count_gb = count_gb.replace('。', '.')
                            video_dict['GB'] += float(count_gb)
                        else:
                            video_dict['GB'] += 0.0
                    count_num += 1
                except Exception as e:
                    error_count += 1
                    print('错误数据>>>>>>>>>>>>info  {}\n error****{}'.format(e, info))
            count_size.append(video_dict)
            print('debug {}合计 总数:{},已计算video数量{},错误数据：{}\ncount_info {}'.format(j, total, count_num, error_count, video_dict))
    total_mb_count = sum([i['MB'] for i in count_size])
    total_gb_count = sum([i['GB'] for i in count_size])
    print('总共需要空间大小:{:.4f}GB'.format(total_gb_count+total_mb_count/1024))
    print('总共需要空间大小:{:.4f}TB'.format((total_gb_count+total_mb_count/1024)/1024))


# 函数入口
def page_info_main():
    # 打开页面
    driver = open_page(setting.get_conf('URL', 'base_url'))
    # 获取分类url
    json_data = get_category_url(driver)
    # print(json_data)
    # 获取详情页url,并写入文件
    get_category_list_url(driver, json_data)
    # 读取文件信息,获取详情页信息,并更新文件信息，重组json
    get_page_detail(driver)


if __name__ == '__main__':
    # page_info_main()
    # driver = open_page(setting.get_conf('URL', 'base_url'))
    # get_page_detail(driver)
    # read_data('53info.json')
    # print(data)
    video_size()
