import json

from Utility_Http import Http
from Utilty_Tools import is_number
from pyquery import PyQuery as PythonQuery
from JCI_Config import base_url, list_api_url
import re
import time


def get_contents_data(url):
    http_obj = Http(url)
    html_doc = PythonQuery(http_obj.get().text)
    html_doc.make_links_absolute(base_url)
    contents = html_doc('.v_news_content').html().replace('\'', '"')
    news_attach_list = html_doc('#web2l>form>div>ul>li')
    news_attaches_list = []
    news_attaches = ''
    tmp_src = html_doc('.v_news_content img').attr.src
    thumb = tmp_src if tmp_src is not None else ''
    for attach_item in news_attach_list.items():
        news_attaches_list.append({
            'attach_url': attach_item.find('a').attr.href,
            'attach_filename': attach_item.text().split('已下载')[0]
        })
    if len(news_attaches_list) > 0:
        news_attaches = json.dumps(news_attaches_list, ensure_ascii=False)
    news_contents = [thumb, contents, news_attaches]
    return news_contents


def get_contents_list_data(column_data):
    # print(column_data)
    # (1, 'www', '校园采撷', 'https://www.jci.edu.cn/index/xycx.htm', 0, 11534, 769)
    pages = column_data[6]
    list_index_url = column_data[3]
    tmp_url = list_index_url[0:-4] + '/'
    news_data = []
    for p in range(pages, 0, -1):
        if p == pages:
            pages_url = list_index_url
        else:
            pages_url = tmp_url + str(p) + '.htm'
        http_obj = Http(pages_url)
        html_doc = PythonQuery(http_obj.get().text)
        html_doc.make_links_absolute(base_url)
        html_data = html_doc('.list-con ul li a')
        for news_item in html_data.items():
            abs_url = news_item.attr.href
            news_links = abs_url.split('/')
            news_col = 0
            news_id = 0
            is_home = 1
            if abs_url.find('//www.jci.edu.cn/') != -1:
                is_home = 0
            if len(news_links) == 6 and is_home == 0:
                news_col = news_links[4]
                news_id = news_links[5].split('.')[0]
                if not is_number(news_col):
                    news_col = 0
                if not is_number(news_id):
                    news_id = 0
            # tmp_news_data = {
            #     'absolute_url': news_item.attr.href,
            #     'column_name': column_data[2],
            #     'news_col': news_col,
            #     'news_id': news_id,
            #     'title': news_item.attr.title,
            #     'timestamp': int(time.mktime(time.strptime(str(news_item.text())[:10], "%Y-%m-%d")))
            # }
            tmp_news_data = [
                abs_url, column_data[2], news_col, news_id, news_item.attr.title, column_data[4],
                int(time.mktime(time.strptime(str(news_item.text())[:10], "%Y-%m-%d"))), is_home
            ]
            news_data.append(tmp_news_data)
        print('已采集【%d】页数据，还有【%d】页数据' % ((pages - p), p))
    return news_data


def get_column_list_data(column_id, column_name, column_url):
    http_obj = Http(column_url)
    html_doc = PythonQuery(http_obj.get().text)
    html_doc.make_links_absolute(base_url)
    html_data = html_doc('.list-con ul li a')
    news_data = []
    for news_item in html_data.items():
        abs_url = news_item.attr.href
        news_links = abs_url.split('/')
        news_col = 0
        news_id = 0
        is_home = 1
        if abs_url.find('//www.jci.edu.cn/') != -1:
            is_home = 0
        if len(news_links) == 6 and is_home == 0:
            news_col = news_links[4]
            news_id = news_links[5].split('.')[0]
            if not is_number(news_col):
                news_col = 0
            if not is_number(news_id):
                news_id = 0
        tmp_news_data = {
            'absolute_url': abs_url,
            'news_col': news_col,
            'news_id': news_id,
            'cid': column_id,
            'column_name': column_name,
            'title': news_item.attr.title,
            'is_home': is_home,
            'timestamp': int(time.mktime(time.strptime(str(news_item.text())[:10], "%Y-%m-%d")))
        }
        news_data.append(tmp_news_data)
    return news_data


def get_column_data(domain='www', data_type='json'):
    news_cfg_data = []
    for column_id in range(0, 10):
        http_obj = Http(list_api_url[column_id].get('url'))
        html_doc = PythonQuery(http_obj.get().text)
        html_doc.make_links_absolute(base_url)
        html_data = html_doc('span.p_t')
        column_cfg = re.sub(r'[\u4e00-\u9fa5]', '', html_data.text()).split(" ")
        if data_type == 'json':
            column_cfg_data = {
                'domain': domain,
                'column_name': list_api_url[column_id].get('title'),
                'column_url': list_api_url[column_id].get('url'),
                'column_id': column_id,
                'news_nums': int(column_cfg[0]),
                'news_pages': int(column_cfg[1])
            }
        else:
            column_cfg_data = (
                domain, list_api_url[column_id].get('title'), list_api_url[column_id].get('url'), column_id,
                int(column_cfg[0]), int(column_cfg[1])
            )
        news_cfg_data.append(column_cfg_data)
    return news_cfg_data
