import requests
from bs4 import BeautifulSoup
import re

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) '
              'AppleWebKit/537.36 (KHTML, like Gecko) '
              'Chrome/61.0.3163.79 Safari/537.36',
}

def init(target_url):
    res = requests.get(target_url, headers=headers)
    res.encoding = 'utf-8'
    soup = BeautifulSoup(res.text, features='html.parser')
    l = soup.find_all('a')
    print('发现链接', len(l))
    okl = []
    for item in l:
        if item.get('href') == None:
            continue
        url = item.get('href')
        if re.match('^http://www.xinhuanet.com.*[0-9]{4}[-/]{1}[0-9]{2}/[0-9]{2}/.*\.htm$', url):
            okl.append(url)
    # print(okl)
    print('初步过滤获得', len(okl))
    return okl

def getContent(target_url):
    # print(target_url)
    res = requests.get(target_url, headers=headers)
    res.encoding = 'utf-8'
    soup = BeautifulSoup(res.text, features='html.parser')
    title = soup.find_all(name='div', attrs={'class': 'h-title'})
    title = __clean(title)
    category = soup.find_all(name='div', attrs={'class': 'news-position'})
    category = __clean(category).replace('新华网', '').replace('正文', '').replace('>', '').strip()
    if category == '':
        category = '无'
    source = soup.find_all(name='div', attrs={'class': 'h-info'})
    source = __clean(source)
    # create_time = soup.find_all(name='div', attrs={'class': 'h-time'})
    # create_time = __clean(create_time)
    create_time = source.split('来源：')[0].strip()
    source = source.split('来源：')[1].strip()
    summary = soup.find_all(name='p')
    summary = __clean(summary)
    if not __check(title, category, source, summary, create_time):
        return None
    return {
        'title': title,
        'sub_title': '',
        'category': category,
        'author': '新华网',
        'source': source,
        'summary': summary,
        'image_url': '',
        'content_url': target_url,
        'create_time': create_time,
    }

def __clean(target):
    if len(target) == 0:
        return None
    else:
        return target[0].text.replace('\r', '').replace('\n', '').replace('\t', '').replace('\u3000', '').strip()

def __check(*args):
    for item in args:
        # print(item)
        if item == None or item == '':
            print(item)
            return False
    return True

def cleanData(data):
    ret = []
    ts = []
    for item in data:
        if item['title'] in ts:
            continue
        ts.append(item['title'])
        ret.append(item)
    return ret
