#!/usr/bin/env python
#coding=utf-8

from ftplib import FTP
from sgmllib import SGMLParser
import codecs
import httplib
import re
import socket
import time
import urllib
import urllib2
import xml.dom.minidom as minidom
import xml.etree.ElementTree as ET

#socket.setdefaulttimeout(60)
#urllib2.socket.setdefaulttimeout(10)

class item:
    def __init__(self):
        self.title = ""
        self.link = ""
        self.img = ""
        self.downloads = []
    def reset(self):
        self.title = ""
        self.link = ""
        self.img = ""
'''        
class ListItem(SGMLParser):      
    def reset(self):
        SGMLParser.reset(self)
        self.items = []
        self.item = item()
        self.is_ul = ""
        
    def start_a(self,attrs):
        if self.is_ul:
            href = [v for k, v in attrs if k == 'href']
            if href and self.item.link == "":
                self.item.link = href[0]
    
    def start_img(self, attrs):
        if self.is_ul:
            img = [v for k, v in attrs if k == 'src']
            if img and self.item.img == "":
                self.item.img = img[0]
                
            title = [v for k, v in attrs if k == 'alt']
            if title and self.item.title == "":
                self.item.title = title[0]
        
    def start_ul(self, attrs):
        ul_class = [v for k, v in attrs if k == 'class']
        if len(ul_class) > 0 and ul_class[0] == 'v':
            self.is_ul = 1
                
    def end_ul(self):
        if self.is_ul == 1:
            temp = item()
            temp.img = self.item.img
            temp.link = self.item.link
            temp.title = self.item.title
            self.items.append(temp)
        
        self.is_ul = ""
        self.item.reset()
        
#    def handle_data(self, text):
#        if self.is_ul:
#            self.item
#        if self.is_h4 == 1:
#            self.size += 1
#            urlitem = item()
#            urlitem.size = self.size
#            urlitem.name = text
#            self.name.append(urlitem)
'''
        
class ListItem(SGMLParser):      
    def reset(self):
        SGMLParser.reset(self)
        self.items = []
        self.item = item()
        self.is_ul = ""
        self.is_div = ""
        self.dived = ""
        self.uled = ""
        
    def start_a(self,attrs):
        if self.is_ul and self.dived and self.is_div == "":
            href = [v for k, v in attrs if k == 'href']
            if href and self.item.link == "":
                self.item.link = "http://v.qq.com"+href[0]

    def handle_data(self, text):
        if self.is_ul and self.dived and self.is_div == "":
            self.item.title += text;
    
    def start_img(self, attrs):
        if self.is_div:
            img = [v for k, v in attrs if k == 'src']
            if img and self.item.img == "":
                self.item.img = img[0]
                
            title = [v for k, v in attrs if k == 'alt']
            if title and self.item.title == "":
                self.item.title += title[0]+" "
        
    def start_div(self, attrs):
        div_class = [v for k, v in attrs if k == 'class']
        if len(div_class) > 0 and div_class[0] == 'mod_pic':
            self.is_div = 1
            self.uled = ""
            self.dived = ""
                
    def end_div(self):
        if self.is_div == 1:
            self.is_div = ""
            self.dived = 1
            
    def start_ul(self, attrs):
        if self.dived and self.uled == "":
            ul_class = [v for k, v in attrs if k == 'class']
            if len(ul_class) > 0 and ul_class[0] == 'mod_data':
                self.is_ul = 1
                
    def end_ul(self):
        if self.is_ul == 1:
            self.is_ul = ""
            self.uled = 1
            temp = item()
            temp.img = self.item.img
            temp.link = self.item.link
            temp.title = self.item.title
            self.items.append(temp)        
        self.is_ul = ""
        self.item.reset()
            
'''        
        if self.is_ul == 1:
            temp = item()
            temp.img = self.item.img
            temp.link = self.item.link
            temp.title = self.item.title
            self.items.append(temp)        
        self.is_ul = ""
        self.item.reset()        
'''
                
address = 'http://www.flvcd.com/parse.php?'
agent = 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3'
def fetchLink(url, downloads):
    """fetch link for the input parameters"""
    para = {'flag' : '', 'format' : '', 'kw' : url}
    req = address + urllib.urlencode(para)
#    print req
#    opener = urllib2.build_opener()
#    opener.addheaders = [('User-agent', agent)]
#    data = opener.open(req).read()
#    page = urllib.urlopen(req)
#    data = page.read()
 
#    req = address + urllib.urlencode(para)
    print req
#    opener = urllib2.build_opener()
#    opener.addheaders = [('User-agent', agent)]

    request = urllib2.Request(req)
    #request.add_header('User-agent', 'Mozilla/5.0 (Linux i686)')

    #requrequest = urllib2.Request("http://vote.activepower.net/script/user/get_vote_save.asp")
    request.add_header('User-Agent','Mozilla/5.0 (X11; U; Linux i686; it-IT; rv:1.9.0.2) Gecko/2008092313 Ubuntu/9.04 (jaunty) Firefox/3.5')
    opener = urllib2.build_opener()
    try:
        data = opener.open(request).read()
    except:
        print "time out to next!!!"
        return    
    # print re.findall("if\(copyToClipboard\('(http.*)'\)\)\{alert",data)
    links = re.findall("(http://.*)\s*<[A-Z]>",data)
#    info = re.findall(".{5,20}(.*?)",data)[0]
    for (index,link) in enumerate(links):
#        print link
##        conn = httplib.HTTPConnection("f.youku.com")
        #print download
##        conn.request("HEAD", link)
##        res = conn.getresponse()
        #print res.getheaders()
##        resUrl=res.getheaders()[3][1]
        downloads.append(link)
#        Popen(['wget','-c', link, '-U', agent,
#        '-O', info + '_' + str(index+1) + '.flv']).wait()

def itemsToXML(items):
    root = ET.Element("metaList")
    for item in items:    
        img = item.img
        title = item.title
        link = item.link
        metaE = ET.SubElement(root, "meta")
        titleE = ET.SubElement(metaE, "title")
        titleE.text = title        
        imgE = ET.SubElement(metaE, "img")
        imgE.text = img
        linkE = ET.SubElement(metaE, "link")
        linkE.text = link
    
        for download in item.downloads:
            downloadE = ET.SubElement(metaE, "download")
            downloadE.text = download

    rough_string = ET.tostring(root, 'utf-8')
    return rough_string
#    print rough_string
#    return minidom.parseString(rough_string)
        
content = urllib2.urlopen('http://v.qq.com/news/latest/all_1.html').read()
listitem = ListItem()
listitem.feed(content)

for item in listitem.items:
    print "========"
    print item.title
    print item.img
    print item.link
    fetchLink(item.link, item.downloads)

#    break
#    break
    time.sleep(2)

    for download in item.downloads:
        print download
    print "========"
#    break
#        conn = httplib.HTTPConnection("f.youku.com")
        #print download
#        conn.request("HEAD", download)
#        res = conn.getresponse()
        #print res.getheaders()
#        resUrl=res.getheaders()[3][1]
#        print resUrl 

#    print "========"
#    break

item_xml = itemsToXML(listitem.items)
print item_xml

cookies = urllib2.HTTPCookieProcessor()
opener = urllib2.build_opener(cookies)        
request = urllib2.Request(
#                          url     = 'http://kankanxiaxia.5gbfree.com/response.php',
                          url     = 'http://xiaxia.sinaapp.com/response.php',
                          headers = {'Content-Type' : 'text/xml'},
                          data    = item_xml)
    
#   f = urllib2.urlopen(req)
f1 = opener.open(request)    

print f1.read()


'''
filename = "meta.xml"
f = open(filename, "wb")
writer = codecs.lookup("utf-8")[3](f)
item_xml.writexml(writer,  indent="\t", addindent="\t", newl="\n", encoding = "utf-8")
writer.close()
f.close()
    
ftp = FTP()
ftp.set_debuglevel(2)
#ftp.connect(host, port, timeout)
ftp.connect("kankanxiaxia.5gbfree.com", "21", 60)
ftp.login("kankanxiaxia", "lingqq")
#print ftp.getwelcome()
#print ftp.dir()
#ftp.dir()
bufsize = 1024
file_handler = open(filename, 'rb')
ftp.storbinary("STOR meta.xml", file_handler, bufsize)
ftp.set_debuglevel(0)
file_handler.close()
ftp.quit()
'''