#!/usr/bin/python
# -*- coding: utf-8 -*-

import requests
import time
from bs4 import BeautifulSoup

from SpiderDataSink import *

#设置请求头部信息
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
    'Accept':'text/html;q=0.9,*/*;q=0.8',
    'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
    'Accept-Encoding':'gzip',
    'Connection':'close',
    'Referer':'http://www.baidu.com/link?url=_andhfsjjjKRgEWkj7i9cFmYYGsisrnm2A-TN3XZDQXxvGsM9k9ZZSnikW2Yds4s&amp;amp;wd=&amp;amp;eqid=c3435a7d00146bd600000003582bfd1f'
}

spider_url = "https://www.solidot.org"
rq = requests.get(url=spider_url, headers=headers)
print("StatusCode:" , str(rq.status_code) , ", Encoding:" , rq.encoding)


#解析抓取的页面内容
html = rq.content
sd = BeautifulSoup(html,'html.parser')
blocks = sd.find_all('div',attrs={'class':'block_m'})

def slim_text(inText):
    return inText.strip()
    
def fill_url(inText):
    if len(inText) > 0:
        return spider_url + inText;
    return ""

for block in blocks:
    title = slim_text(block.find('div',attrs={'class':'bg_htit'}).get_text())
    title = (title.encode('utf-8')).decode('utf-8')
    Category = "其它"
    ar = title.split(':')
    if len(ar) > 1:
        Category = ar[0].strip()
        title = ar[1].strip()

    content = slim_text(block.find('div',attrs={'class':'p_mainnew'}).get_text())
    link_url = fill_url(block.find('div',attrs={'class':'l'}).find('a')['href'])
    # FIXME : failed to get publish time

    print("=================================================")
    print("Category: " + Category)
    print("title: " + title)
    print("content: " + content)
    print("link_url: " + link_url)
    print("=================================================")

    dataSink({'category': Category, 'title': title, 'content': content, 'link_url': link_url, 'origin': 'solidot'})

