#!/usr/bin/env python
# -*- coding: cp936 -*-

"""Get proxies from urls, and test their speed"""
import re, time, threading, sys, os
import codecs
import HTMLParser, urllib, urlparse
import BeautifulSoup

##import sys
##streamWriter = codecs.lookup('utf-8')[-1]
##sys.stdout = streamWriter(sys.stdout)


fromurl = """http://www.euchn.com/info/mobile/index.asp?mp="""


numset = set(open('haoma').readlines())
numlist = list()
for num in numset:
    numlist.append(num.strip())
numlist.sort()

try:
    outfile = open('xj.txt', 'r')
    print 'output file xj.txt exist, clear it? [y/n]',
    prm = raw_input()
    outfile.close()
    print prm
    if prm.lower().find('y')!=-1:
        outfile=open('xj.txt', 'w')
        print 'clear old'
    else:
        outfile=open('xj.txt', 'a')
        print 'append to old'
except IOError:
    outfile = open('xj.txt', 'w')
        
cator = range(1,20)
for i in range(len(cator)):
    cator[i] = set()


    
##    for i in range(2,6,2):
##        key = r1.match(str(all_td[i])).group(1)
##        value = r1.match(str(all_td[i+1])).group(1)
##        if key and value:
##            data[key] = value
##        cator[i+1].add(value+'\n')
##        print key, ":", value
##    tw = []
##    for key,value in data.items():
##        tw[0:0] = [str(key), str(value)+'\n']
##    tw[0:0] = [str(num) +'\n',]
##    tw.append('\n')
##    outfile.writelines(tw)


try:
    for i in range(len(numlist)):
        tup = process(numlist[i])
        outfile.write("%5s %s (%d/%d)"% (tup[0],tup[1], i, len(numlist)))
        if (int(i)+1)%5 == 0:
            outfile.write(os.linesep)
except KeyboardInterrupt:
    outfile.writelines(cator[3])
    outfile.writelines(cator[5])
    outfile.close()

def genRange(min,max):
    while min <= max:
        yield min
        min +=1

def process(num):
    f = urllib.urlopen(fromurl+str(num))
    b = BeautifulSoup.BeautifulSoup(f)
    all_td = b.findAll('td', bgcolor = "#EFF1F3")
    r1 = re.compile('"red">(.*)</font')
    top =  str(all_td[3])
    value = r1.findall(top)[0]
    print num, value
    return (num, value)

##    get = raw_input()
    
##    url = anchor.get('href')
##    if url is None or url in seen: continue
##    seen.add(url)
##    pieces = urlparse.urlparse(url)
##    if pieces[0]=='http':
##        print urlparse.urlunparse(pieces)

##
##
##BUFSIZE = 8192
##while True:
##    data = f.read(BUFSIZE)
##    if not data: break
##    try:
##        p.feed(data)
##    except HTMLParser.HTMLParseError:
##        print "error here"
##    else:
##        print "!!!!!!!!!!!11"
##try:
##    p.close( )
##except HTMLParser.HTMLParseError:
##    print "close error!"


##def getu(url, fencode='cp936'):
##    f = urllib.urlopen(url)
##    f = codecs.open(f, 'r', fencode)
##    return u"".join(f)
##
##s = raw_input()
##fc = open("pppp.txt")
##fc = codecs.EncodedFile(fc, 'utf-8', 'cp936')
##while s.upper() != 'STOP':
##    print "u entered :" ,s
##    line = unicode(fc.next(),'utf-8')
##    first = line.find('"')
##    end = line.find('"', first+1)
##    print "file(%s, from %d to %d) has :" %(line.__class__, first,end), line
##    s = raw_input()
##
