#coding=utf-8
import urllib2
import BeautifulSoup
import gzip
import cStringIO
import os,sys,time

USER = "guoxiaokun@gmail.com"
PASS = "May301977"
NUMBER = 20000
URL ="http://www.google.com/reader/atom/feed/http://twitter.com/statuses/user_timeline/15527964.rss?n="
URL += str(NUMBER)
HOST = "www.google.com"

params = {
    "Connection": "keep-alive",
    "Accept":"application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5",
    "User-Agent":"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.224 Safari/534.10",
    "Accept-Encoding":"gzip,deflate,sdch",
    "Accept-Language":"zh-CN,zh;q=0.8",
    "Accept-Charset":"GBK,utf-8;q=0.7,*;q=0.3",
    "Cookie":"GRLD=zh-CN:11876543968027729218; rememberme=true; MPRF=H4sIAAAAAAAAAKv49WL6_n5VAKDHzPcHAAAA; PREF=ID=4990742656fd5bd9:U=81f424a01584c2bf:FF=0:LD=en:NW=1:CR=2:TM=1220406813:LM=1288663968:DV=AA:GM=1:S=3FEClzCt-QEac5iB; NID=40=j9tFRtoZcmPlpJV--2HhNo_eWYcl-c4M5DFAPgrsJN0goN5yoU2fsyaa-LiqwSQzCsiw8aHydeXTfXBKA_OSV9EBv9oLSqPY86-GrNkkTU2WVqFaoBrdkOOpXQXXADlV; S=googlebase=HI4999kDLybmhGve8Nv7kw; HSID=AQmAnG0P0fDcS2WyH; SID=DQAAAMcAAAB9xbGGmtHvse2k26SB7XLVTz6cfyuY-3zr4su9XDDiU3k-HSy3G-Y5T_vTsf9n6lQzpTfCSDMMlkPFPdbLnrGlvCTB4ubP8-Gm8N0CJFvhL9uoyKxeHgx6GZPfw8nDYX6ZVvWpGCjuL2ZqCiLruBtYBlmaicRf6ZYeiJ5qxymJn0t7rpbOeudIcGWWnDgPc0qS1AWVVhkEU5S1JHVHwgFFDVVYMdT6GsZkA7kxGctEM8EIJF6EcHSIQesRm-LtVb2xa0arjv7xj5Za4xmmRI8F" 
}
req = urllib2.Request(URL, None, params)
res = urllib2.urlopen(req)
gzipped_html = res.read()
res.close()

gzipper = gzip.GzipFile(fileobj=cStringIO.StringIO(gzipped_html))
html = gzipper.read()
#print html

soup = BeautifulSoup.BeautifulSoup(html)
titles = soup.findAll('title',type='html')
data = []
for i in xrange(len(titles)):
    if i%2==0:
        #print titles[i].string
        data.append(titles[i].string)


