#!/usr/bin/python
# -*- coding:utf-8 -*-
__author__ = 'leiyuany'

import ConfigParser
import urllib2
import urllib
from bs4 import BeautifulSoup
import re

BASE_URL = "https://help.aliyun.com"
INDEX_URL = "https://help.aliyun.com/noticelist/9004748.html"

#CONFIG_DATA_FILE = "E:\Project\crawl_test\config"
cf = ConfigParser.SafeConfigParser()
cf.read("E:\Project\crawl_test\config\data.conf")

def get_config_last_data():
    href = cf.get("data","href")
    title = cf.get("data","title")
    articleid = cf.get("data","articleids").strip()
    print articleid
    articleids = []
    articleids = articleid.split(",")

    return href, title, articleids

def set_config_data(href,title,articleids):
    cf.set("data","href",href)
    cf.set("data","title",title)
    cf.set("data","articleids",articleids)
    cf.write(open("data.conf","w"))



def soup(url):
    req = urllib2.Request(url)
    response = urllib2.urlopen(req)
    the_page = response.read()
    page_source = BeautifulSoup(the_page,"html.parser")
    return page_source

def start():
    href, title, articleids = get_config_last_data()

    page_source = soup(INDEX_URL)
    gonggaoids = page_source.findAll(href = re.compile(r"noticelist/articleid"))
    #print gonggaoids
    for index,item in enumerate(gonggaoids):
        title = item.text
        href = item["href"]
        #gonggaoid = re.compile(r'\d{8}').findall(href)
        gonggaoid = re.search(re.compile(r'\d{8}'),href).group()
        #print gonggaoid

        if gonggaoid in articleids:
            print "沃尔分开少了"
            break
        else:
            print "===============新公告==================="
            href_url =BASE_URL + href
            print "公告详情：",href_url
            articleids.append(gonggaoid)
            set_config_data(href,title,articleids)
            send_body = soup(href_url)

            print send_body












if __name__ == "__main__":
    start()