#!python2
# -*- coding: utf-8 -*-

import glob
import sys

sys.path += glob.glob(os.path.join(os.path.dirname(sys.executable),"*.egg"))

import util
import lxml
import lxml.etree
import urlparse
import re
import pyquery.pyquery as pq
import time
import os
import lxml._elementpath
import cssselect

url="http://www.ygdy8.net/"

input_coding="gbk"
output_coding="gbk"
time_to_wait = 3

resources=[u"最新电影下载",
        u"迅雷电影资源",
        u"华语剧集专区",
        u"日韩剧集专区",
        u"欧美剧集专区",
        u"迅雷综艺节目",
        u"迅雷动漫资源",
        ]

def get_p(u, o):
    fp=open(o, "w")
    http.get(u, fp)
    fp.close()

def get_table(fp):
    a=""
    start = 0
    end = 0
    while True:
        l = fp.readline()
        if not l:
            break
        if "<table" in l:
            #a += l
            start = 1
            #continue
        if "/table>" in l:
            a += l
            end = 1
            break
        if start:
            a += l
    return a

def get_desc(fp):
    a=""
    start = 0
    while True:
        l = fp.readline()
        if not l:
            break
        if "Content Start" in l:
            start = 1
            continue
        if u"下载地址".encode("gbk") in l:
            a += l
            break
        if start:
            a += l
    for i in ["\r\n", "\n", "<p>","</p>","<br />","<br>","&nbsp;",
            "</font","</strong>","</span>",
            ]:
        a = a.replace(i, '')
    return a

def get_title(fp):
    tt = ""
    while True:
        l = fp.readline()
        if "title_all" in l:
            q=pq.PyQuery(l.decode(input_coding, errors="ignore"))
            t = q[0].find("h1/font")
            if t is not None:
                tt = t.text
                break
    return tt.encode(output_coding,errors="ignore")

def get_links(fp):
    links = []
    while True:
        t1=get_table(fp)
        if not t1:
            break
        q=pq.PyQuery(t1.decode(input_coding, errors="ignore"))
        t = q[0].find("tbody/tr/td/a")
        if t is not None:
            links.append(t.attrib["href"].encode(output_coding, errors="ignore"))
    return links

def get_u(f):
    def get_l(t):
        #now=time.strftime("%Y-%m-%d", time.localtime())
        #now = "2014-08-26"
        q=pq.PyQuery(t)
        l = []
        for tr in q.children():
            d = tr.find("td/font")
            if d is not None:
                #print d.text,now
                if d.text == now:
                    a = tr.findall("td/a")
                    if a[0].text in resources:
                        l.append((a[1].text,a[1].attrib["href"]))
        return l
    fp=open(f)
    links = []
    while True:
        t1 = get_table(fp)
        if not t1:
            break
        links += get_l(t1.decode(input_coding,errors="ignore"))
    fp.close()
    return links
def get_a():
    tmp1="n.html"
    tmp2 = "n1.html"
    get_p(url, tmp1)
    links = get_u(tmp1)
    for n,l in links:
        u = urlparse.urljoin(url, l)
        get_p(u, tmp2)
        #get_download_link(tmp1)
        fp = open(tmp2)
        title=get_title(fp)
        desc=get_desc(fp)
        urls= get_links(fp)
        fp.close()
        fp=open(output,"a")
        fp.write("%s,%s,%s,%s\n" % (title.replace(",","_"),"",desc.replace(",","_"),"#".join(urls).replace(",","_")))
        fp.close()
        time.sleep(time_to_wait)

if __name__ == "__main__":
    now=time.strftime("%Y-%m-%d", time.localtime())
    if len(sys.argv) > 1:
        now = sys.argv[1]
    output="dytt_%s.csv" % now
    if not os.path.exists(output):
        open(output,"w").write("#name,#image_url,#description,#download_url\n")
    http=util.HTTP(cookiefile="cookie.txt",cookiejar="cookie.txt", timeout=20)
    get_a()
