#! /usr/bin/env python
#coding=utf-8
import urllib2
from BeautifulSoup import BeautifulSoup
import re


url = 'http://www.nuoqiu.com/column/%d.html'
MAXCAT = 14

all_story={}
for i in range(MAXCAT):
    newUrl = url % i
    page = urllib2.urlopen(newUrl)
    soup = BeautifulSoup(page,fromEncoding="utf-8")
    story_list = getStoryList(soup)

page = urllib2.urlopen("http://www.nuoqiu.com/column/2.html")
soup = BeautifulSoup(page,fromEncoding="utf-8")
f = file('a.txt','wb')
for incident in soup.find('div',{'class':'gxshow'}).findAll('li',{'class':'info'}):
    #f.write(str(incident.string)+'\n')
    b = re.compile(r'^\d{1}/(\d+)')
    print b.search(str(incident.string)).groups()[0]
    print str(incident.string)
f.close


#def getPageNum(${2:}):
#
def getStoryList(soup):
    story = []
    for label in soup.find('div',{'class':'gxshow'}).findAll('a'):
        story.append(str(label.string))
    return story

