import cookielib
import urllib
import urllib2

headers = {
    'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; zh-CN; rv:1.9.2.13) Gecko/20101203 Firefox/3.6.13',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    'Accept-Language': 'zh-cn,zh;q=0.5',
    'Accept-Encoding': 'gzip,deflate',
    'Accept-Charset': 'Accept-Charset: GB2312,utf-8;q=0.7,*;q=0.7',
    'Keep-Alive': '115',
    'Connection': 'close',
}
cj = cookielib.CookieJar()
cookie_support = urllib2.HTTPCookieProcessor(cj)
opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler)
opener.addheaders = [(k,v) for k,v in headers.iteritems()]

url = 'https://www.domainmonster.com/login/'
postdata = urllib.urlencode({'action':'dologin', 'username':'weber110@gmail.com', 'password':'5rihaigong'})
content = opener.open(url, postdata).read()

def login():
    #urllib2.install_opener(opener)
    url = 'https://www.domainmonster.com/login/'
    postdata = urllib.urlencode({'action':'dologin', 'username':'weber110@gmail.com', 'password':'5rihaigong'})
#    req = urllib2.Request(
#                          url=url,
#                          data=postdata,
#                          headers = headers
#                          )
#    return urllib2.urlopen(req).read()
    content = opener.open(url, postdata).read()
    print cj.__dict__

def backorder():
    url = 'http://www.domainmonster.com/backorder/'
    arr = []
    _file = open('dm-not')
    for i in _file.readlines():
        postdata = urllib.urlencode({'q':i.split('.')[0], 'td':'.cc', 't':'16'})
        arr.append(opener.open(url, postdata).read())
#    headers.update({'Referer':'http://www.domainmonster.com/domain-registration/'})
#    req = urllib2.Request(
#                          url=url,
#                          data=postdata,
#                          headers = headers,
#                          )
#    return urllib2.urlopen(req).read()
    return 

def basket():
    url = 'http://www.domainmonster.com/basket/'
    postdata = urllib.urlencode({'action':'addbackorderitem', 'nm':'dandong', 'tld':'cc', 'iTLD':'16', 'return':'/domain-registration/'})
#    headers.update({'Referer':'http://www.domainmonster.com/backorder/'})
#    req = urllib2.Request(
#                          url=url,
#                          data=postdata,
#                          headers=headers
#                          )
    try:
#        return urllib2.urlopen(req).read()
        return opener.open(url, postdata).read()
    except urllib2.HTTPError, error:
        return error.read()

def crawler():
    c1 = login()
#    f1 = open('f1.htm', 'wb')
#    f1.write(c1)
#    f1.close()
#
#    c2 = backorder()
#    f2 = open('f2.htm', 'wb')
#    f2.write(c2)
#    f2.close()
#
#    c3 = basket()
#    f3 = open('f3.htm', 'wb')
#    f3.write(c3)
#    f3.close()

def query():
    from pyquery import PyQuery as pq
    from lxml import etree
    f = open('f2.html')
    content = f.read()
    d = pq(content)
    print d('#boxes').find('form').eq(0).find('input').eq(0).val()

if __name__ == '__main__':
    crawler()
    #query()
    
