#!/usr/bin/env python3
# #-*-coding: utf-8 -*-
__author__ ="wqy";

import requests;
from bs4 import BeautifulSoup;
import re;

def get_history_news_data(url):
    response=requests.get(url=url);
    # print(response.status_code);# 响应状态码
    # print(response.apparent_encoding);# 通过响应上下文推断编码
    # print(response.text);#文本数据
    #print(response.json());#json数据
    #print(response.content);
    title_list = [];
    content_list = [];
    if response.status_code == 200:
        response.encoding=response.apparent_encoding;
        print(response.text);

        bs = BeautifulSoup(markup=response.text,features="html.parser");
        tr_list = bs.find_all(name="tr");
        tr_list=bs.find_all(name="tr");
        for tr in tr_list:
            title=tr.find(name="font",attrs={"color":"#ff9933"});
            content = tr.find(name="div", attrs={"style":"MARGIN-left: 10px;MARGIN-top:5px;MARGIN-right: 10px;font-size: 14px;line-height: 20px;"});
            if title:
                title_list.append(title.get_text());
            if content:
                content_list.append(content.get_text());

    return title_list, content_list;

if __name__ == '__main__':
    url = "http://cs.lsqn.cn/";
    get_history_news_data(url);



