#!/usr/bin/evn python
# -*- coding:utf-8 -*-
#
###############################
# Auther : free.won
# Email  : freefis@Gmail.com
# Licence on GPL Licence
###############################

"""There such trunk of url in kissbit.org. so i collect them in the List.And Made multi-crawler to
fetch them. Maxmise the Performance of CPU."""


import urllib2
import re
import threading
import urlparse

class daohang():
    """crawls the url from assigned site."""
    def bitren(self):
        """fetch from www.bitren.com"""
        html = urllib2.urlopen("http://www.bitren.com").read()
        regex = r"\<a rel=\"external\" href=\"(.*)\s.*\>(.*)\<\/a\>"
        links = re.compile(regex).findall(html)
        return links

    def nfans(self):
        """fetch from nfans.org"""
        html = urllib2.urlopen("http://nfans.org").read()
        regex0 = r"href=\"(http.*)\"\s+.*\s+id.*\">(.*)</a><a>"
        regex1 = r"href=\"(ftp.*)\"\s+.*\s+id.*\">(.*)</a><a>="
        links = re.compile(regex0).findall(html)
        ftp_link  = re.compile(regex1).findall(html)
        links += ftp_link
        return links


    def kissbit(self):
        """fetch from kissbit.org"""
        all_url = []
        BASE_URL = "http://kissbit.cn/"
        conn = urllib2.urlopen(BASE_URL)
        html = conn.read()
        conn.close()
        site_regex = r'href="(\.\/.*)"\s*target.*</li>'
        site_list =[urlparse.urljoin(BASE_URL,one) \
                for one in re.compile(site_regex).findall(html)]
        site_list.append(BASE_URL)
        class ParallelCrawler(threading.Thread):
            """Spawn some crawler to fetch the url,parallelly. """
            def __init__(self,site):
                self.site = site
                threading.Thread.__init__(self)
            def run(self):
                conn = urllib2.urlopen(self.site)
                html = conn.read()
                conn.close()

                url_regex = r'<a href="(.*)"\s*.*\s*>(.*)</a></li>'
                l = re.compile(url_regex).findall(html)
                del html
                for one in l:
                    if "style=\"color" not in one[0]:
                        all_url.append(one)
        m = []
        for one in site_list:
            spawn = ParallelCrawler(one)
            m.append(spawn)
        for one in m:
            one.start()
            one.join()
        return all_url


_p = daohang()
bitren  = _p.bitren
nfans   = _p.nfans
kissbit = _p.kissbit


def update_page():
	from mysite.daohang.models import Site
	from django.utils.encoding import force_unicode
	url_set = bitren() + nfans() + kissbit()
	for one in url_set:
		s=Site(url=one[0],name=force_unicode(one[1]))
		s.save()
	print "ok"
