#!/usr/bin/env python
#coding=utf-8
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# @author: Holy
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util

#from ftplib import FTP
from sgmllib import SGMLParser
import codecs
import httplib
import re
import socket
import time
import urllib
import urllib2
import xml.dom.minidom as minidom
import xml.etree.ElementTree as ET

#urllib2.socket.setdefaulttimeout(25)
#import socket 
#socket.setdefaulttimeout(10)
class item:
    def __init__(self):
        self.title = ""
        self.link = ""
        self.img = ""
        self.downloads = []
    def reset(self):
        self.title = ""
        self.link = ""
        self.img = ""
'''        
class ListItem(SGMLParser):      
    def reset(self):
        SGMLParser.reset(self)
        self.items = []
        self.item = item()
        self.is_ul = ""
        
    def start_a(self,attrs):
        if self.is_ul:
            href = [v for k, v in attrs if k == 'href']
            if href and self.item.link == "":
                self.item.link = href[0]
    
    def start_img(self, attrs):
        if self.is_ul:
            img = [v for k, v in attrs if k == 'src']
            if img and self.item.img == "":
                self.item.img = img[0]
                
            title = [v for k, v in attrs if k == 'alt']
            if title and self.item.title == "":
                self.item.title = title[0]
        
    def start_ul(self, attrs):
        ul_class = [v for k, v in attrs if k == 'class']
        if len(ul_class) > 0 and ul_class[0] == 'v':
            self.is_ul = 1
                
    def end_ul(self):
        if self.is_ul == 1:
            temp = item()
            temp.img = self.item.img
            temp.link = self.item.link
            temp.title = self.item.title
            self.items.append(temp)
        
        self.is_ul = ""
        self.item.reset()
        
#    def handle_data(self, text):
#        if self.is_ul:
#            self.item
#        if self.is_h4 == 1:
#            self.size += 1
#            urlitem = item()
#            urlitem.size = self.size
#            urlitem.name = text
#            self.name.append(urlitem)
'''
        
class ListItem(SGMLParser):      
    def reset(self):
        SGMLParser.reset(self)
        self.items = []
        self.item = item()
        self.is_ul = ""
        self.is_div = ""
        self.dived = ""
        self.uled = ""
        
    def start_a(self,attrs):
        if self.is_ul and self.dived and self.is_div == "":
            href = [v for k, v in attrs if k == 'href']
            if href and self.item.link == "":
                self.item.link = "http://v.qq.com"+href[0]

    def handle_data(self, text):
        if self.is_ul and self.dived and self.is_div == "":
            self.item.title += text;
    
    def start_img(self, attrs):
        if self.is_div:
            img = [v for k, v in attrs if k == 'src']
            if img and self.item.img == "":
                self.item.img = img[0]
                
            title = [v for k, v in attrs if k == 'alt']
            if title and self.item.title == "":
                self.item.title += title[0]+" "
        
    def start_div(self, attrs):
        div_class = [v for k, v in attrs if k == 'class']
        if len(div_class) > 0 and div_class[0] == 'mod_pic':
            self.is_div = 1
            self.uled = ""
            self.dived = ""
                
    def end_div(self):
        if self.is_div == 1:
            self.is_div = ""
            self.dived = 1
            
    def start_ul(self, attrs):
        if self.dived and self.uled == "":
            ul_class = [v for k, v in attrs if k == 'class']
            if len(ul_class) > 0 and ul_class[0] == 'mod_data':
                self.is_ul = 1
                
    def end_ul(self):
        if self.is_ul == 1:
            self.is_ul = ""
            self.uled = 1
            temp = item()
            temp.img = self.item.img
            temp.link = self.item.link
            temp.title = self.item.title
            self.items.append(temp)        
        self.is_ul = ""
        self.item.reset()
            
'''        
        if self.is_ul == 1:
            temp = item()
            temp.img = self.item.img
            temp.link = self.item.link
            temp.title = self.item.title
            self.items.append(temp)        
        self.is_ul = ""
        self.item.reset()        
'''
                
address = 'http://www.flvcd.com/parse.php?'
agent = 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.8; en-US; rv:1.9.1.3) Gecko/20100822 Firefox/3.6.5'
def fetchLink(url, downloads):
    """fetch link for the input parameters"""
    para = {'flag' : '', 'format' : '', 'kw' : url}
    req = address + urllib.urlencode(para)
#    print req
#    opener = urllib2.build_opener()
#    opener.addheaders = [('User-agent', agent)]

    request = urllib2.Request(req)
    #request.add_header('User-agent', 'Mozilla/5.0 (Linux i686)')

    #requrequest = urllib2.Request("http://vote.activepower.net/script/user/get_vote_save.asp")
    request.add_header('User-Agent', agent)
    opener = urllib2.build_opener()
#    data = opener.open(request, timeout=10).read()
#    data = urllib2.urlopen(request).read()

    try:
        data = urllib2.urlopen(request).read()
    except:
#        print "time out to next!!!"
        return   
    
#    data = opener.open(req).read()
#    page = urllib.urlopen(req)
#    data = page.read()
    
    # print re.findall("if\(copyToClipboard\('(http.*)'\)\)\{alert",data)
    links = re.findall("(http://.*)\s*<[A-Z]>",data)
#    info = re.findall(".{5,20}(.*?)",data)[0]
    for (index,link) in enumerate(links):
#        print link
##        conn = httplib.HTTPConnection("f.youku.com")
        #print download
##        conn.request("HEAD", link)
##        res = conn.getresponse()
        #print res.getheaders()
##        resUrl=res.getheaders()[3][1]
        downloads.append(link)
#        Popen(['wget','-c', link, '-U', agent,
#        '-O', info + '_' + str(index+1) + '.flv']).wait()

def itemsToXML(items):
    root = ET.Element("metaList")
    for item in items:    
        img = item.img
        title = item.title
        link = item.link
        metaE = ET.SubElement(root, "meta")
        titleE = ET.SubElement(metaE, "title")
        title = unicode(title, 'utf-8')
        #title = unicode(title, 'utf-8')
        titleE.text = title     
        imgE = ET.SubElement(metaE, "img")
        imgE.text = img
        linkE = ET.SubElement(metaE, "link")
        linkE.text = link
    
        for download in item.downloads:
            downloadE = ET.SubElement(metaE, "download")
            downloadE.text = download    
    rough_string = ET.tostring(root, 'utf-8')
    return rough_string
        
content = urllib2.urlopen('http://v.qq.com/news/latest/all_1.html').read()
listitem = ListItem()
listitem.feed(content)

#for item in listitem.items:
#    print "========"
#    print item.title[0].decode('utf8').encode('utf8')
#    print item.img
#    print item.link
#    fetchLink(item.link[0], item.downloads)
#    for download in item.downloads:
#        print download
#    print "========"

class MainHandler(webapp.RequestHandler):
    def get(self):
 #       content = urllib2.urlopen('http://www.youku.com/v_showlist/t2c0d1p17.html').read()
        opener = urllib2.build_opener()
        req = 'http://v.qq.com/news/latest/all_1.html'
        opener.addheaders = [('User-agent', agent)]
        content = opener.open(req).read()
        listitem = ListItem()
        listitem.feed(content)
        count = 0
        for item in listitem.items:
            self.response.out.write(item.title.decode('utf8').encode('utf8'))
            self.response.out.write("<br>")
            self.response.out.write(item.img)
            self.response.out.write("<br>")
            self.response.out.write(item.link)
            fetchLink(item.link, item.downloads)
	    for download in item.downloads:
		self.response.out.write("<br>")
		self.response.out.write(download)
#		break
#            break
#            time.sleep(61)
            count += 1
            if count%2 == 0:
                break
#                time.sleep(61)
            self.response.out.write("<br>========================<br>")
        item_xml = itemsToXML(listitem.items)
        cookies = urllib2.HTTPCookieProcessor()
        opener = urllib2.build_opener(cookies)        
        request = urllib2.Request(
                                  url     = 'http://kankanxiaxia.5gbfree.com/response.php',
                                  headers = {'Content-Type' : 'text/xml'},
                                  data    = item_xml)
    
        f = opener.open(request)
        self.response.out.write("<br>")
        self.response.out.write(f.read())
			
'''			
	    item_xml = itemsToXML(listitem.items)
	    filename = "meta.xml"
	    f = open(filename, "wb")
	    writer = codecs.lookup("utf-8")[3](f)
	    item_xml.writexml(writer,  indent="\t", addindent="\t", newl="\n", encoding = "utf-8")
	    writer.close()
	    f.close()
    
	    ftp = FTP()
	    ftp.set_debuglevel(2)
	    ftp.connect("kankanxiaxia.5gbfree.com", "21", 60)
	    ftp.login("kankanxiaxia", "lingqq")
	    bufsize = 1024
	    file_handler = open(filename, 'rb')
	    ftp.storbinary("STOR meta.xml", file_handler, bufsize)
	    ftp.set_debuglevel(0)
	    file_handler.close()
	    ftp.quit()
'''
def main():
 
    application = webapp.WSGIApplication([('/', MainHandler)],
                                         debug=True)
    util.run_wsgi_app(application)

if __name__ == '__main__':
    main()
