import json
from urllib.parse import urlencode
import pymongo
import requests
from bs4 import BeautifulSoup
from requests.exceptions import ConnectionError
from config import *


client = pymongo.MongoClient('127.0.0.1',connect=False)
db = client[MONGO_DB]

def get_page_index_temp(date,site):
    data = {
        # 'cate': 'photon',#'newsgn,newsgj,newssh,milite,green,history,health2012',
        'site': site,
        'date':date,
        'mode':'1',
        'of':'json'
    }

    params = urlencode(data)
    base = 'http://roll.news.qq.com/interface/roll.php'
    url = base + '?' + params

    try:
        response = requests.get(url,headers = HEADERS)
        if response.status_code==200:
            return response
        return None
    except ConnectionError:
        print('Error occurred')
        return None

def get_page_index(date,site,pageNum):
    data = {
        # 'cate': 'photon',#'newsgn,newsgj,newssh,milite,green,history,health2012',
        'site':site,
        'date': date,
        'page': pageNum,
        'mode':'1',
        'of':'json',
    }

    params = urlencode(data)
    base = 'http://roll.news.qq.com/interface/roll.php'
    url = base + '?' + params
    try:
        print('正在爬取的网页为：',url)
        response = requests.get(url,headers = HEADERS)
        if response.status_code==200:
            return response.text
        return None
    except ConnectionError:
        print('Error occurred')
        return None


def parse_maxNum(response):
    # print(response)
    datas = json.loads(response.text)

    if datas:
        pageMaxNum = datas['data']['count']
        return pageMaxNum


def parse_page_index(response):
    news_urls = []
    datas = json.loads(response)
    if datas:
        newsLists = datas['data']['article_info']
        soup = BeautifulSoup(newsLists, 'lxml')
        # 获得URL
        links = []
        for link in soup.select('a'):
            links.append(link['href'])
        # 获得新闻标签
        labels = []
        for label in soup.select('.t-tit'):
            labels.append(label.get_text())

        links_labels = [links[i] + labels[i] for i in range(min(len(labels),len(labels)))]

        return links_labels



def get_page_detail(url):
    url, label = url.split('[')[0], url.split('[')[1]
    try:
        response = requests.get(url)
        if response.status_code == 200:
            return response
        return None
    except ConnectionError:
        print('Error occurred')
        return None



def parse_page_detail(html, url):
    url, label = url.split('[')[0], url.split('[')[1]
    label = label[0:len(label)-1]

    print('url: ',url)

    soup = BeautifulSoup(html, 'lxml')
    title = soup.title.string
    title = title.split('_')[0]
    print('title:',title)

    result1 = soup.select('.a_source > a')
    origal = result1[0].get_text() if result1 else ''
    print('origal:',origal)

    result2 = soup.select('.a_time')
    if result2:
        time = result2[0].get_text()
    else :
        time = url.split('/')[4]
    print('time:',time)

    contents = []
    # contents = re.findall('<p class="text" style="TEXT-INDENT:2em">(.*?)</p>',html,re.S)
    contents = soup.select('#Cnt-Main-Article-QQ')


    print('contents:',contents)

    return {
                'time':time,
                'origal':origal,
                'title': title,
                'label':label,
                'url': url,
                'contents': str(contents)
            }

def save_to_mongo(result,tableName):
    if db[tableName].insert_one(dict(result)):
        print('Successfully Saved to Mongo', result)
        return True
    return False
