#encoding=utf-8

__author__ = 'dragon'

import urllib2
import os
import pymongo
import time
import hashlib

def myspider(startweb, keyword):
    list = [startweb]
    curindex = 0
    Keyword = keyword

    #网络上MongoHQ
    #con = pymongo.Connection("paulo.mongohq.com", 10042)
    #db = con.mytest
    #db.authenticate("dragon", "dragon")
    #db.urllist.drop()

    #本地数据库
    con = pymongo.Connection("localhost", 27017)
    db = con.mytest

    while curindex < len(list):
        url = list[curindex]
        print "list count =", len(list), "  curcheck ", curindex
        print "try to visit ", url

        headers = ('User-Agent', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.66 Safari/537.36')

        try:
            opener = urllib2.build_opener()
            opener.addheaders = [headers]
            openness = opener.open(url, None, 30)
            data = openness.read()
            opener.close()
        except:
            print "some error ..."
            curindex += 1
            continue

        print "finish get data..."

        os.remove("d:/test.txt")
        file = open("d:/test.txt", "a")
        print >> file, data
        file.close()

        myfile      = open("d:/test.txt", "r")
        mystring    = myfile.read()
        myfile.close()

        #找到标题
        title       = ""
        headstart   = mystring.find("<head>")
        headend     = mystring.find("</head>")
        if headstart < 0:
            headstart   = mystring.find("<HEAD>")
            headend     = mystring.find("</HEAD>")

        if headstart > 0:
            titlestart  = mystring.find("<title>")
            titleend    = mystring.find("</title>")
            if titlestart < 0:
                titlestart  = mystring.find("<TITLE>")
                titleend    = mystring.find("</TITLE>")

            if titleend > titlestart and titlestart < headend and titleend < headend:
                title = mystring[titlestart+len("<title>"):titleend]

        dbdata = {"title":"", "url":"", "time":""}

        try:
            title = title.decode("utf-8").encode("utf-8")
        except:
            try:
                title = title.decode("gbk").encode("utf-8")
            except:
                pass


        dbdata["title"] = title
        dbdata["url"] = url
        dbdata["time"] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
        try:
            db.urllist.insert(dbdata)
        except:
            print "insert error"

        if len(mystring) > 0:
            while len(mystring) > 0:
                start = mystring.find("href=\"")
                if start <= 0:
                    break

                substring = mystring[start+6:]
                end = substring.find("\"")
                weblink = substring[:end]
                if Keyword != "":
                    if weblink.find(Keyword) >= 0 and list.count(weblink) <= 0:
                        list.append(weblink)
                elif 0 > weblink.find("video.sina.com.cn") \
                    and 0 > weblink.find("video.baidu.com") \
                    and 0 <= weblink.find("http:") \
                    and 0 >= list.count(weblink):

                    list.append(weblink)

                mystring = mystring[start+6:]

        curindex += 1

def mylogonweibo(qq, pwd):
    code=""
    userID=qq
    url='http://ptlogin2.qq.com/login'
    checkurl="http://check.ptlogin2.qq.com/check?uin="+userID
    headers = ('User-Agent', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.66 Safari/537.36')
    response=urllib2.urlopen(checkurl)
    responsestr = response.read()
    code=responsestr.split(",")[1]
    print code
    print responsestr

    pwd1 = hashlib.md5(pwd).hexdigest().upper()
    pwd1 = hashlib.md5(pwd1).hexdigest().upper()

    pwd="648C9949C3EC10D601229FD10CAA9FC2"
    pwd=hashlib.md5(pwd1+code.upper()).hexdigest().upper()
    params=\
        {
        "ptlang":"2052",
        "u":userID,
        "p":pwd,
        "verifycode":code,
        "low_login_enable":'1',
        "low_login_hour":'720',
        "css":"http://imgcache.qq.com/ptcss/b4/wb/46000101/login1.css",
        "aid":'46000101',
        "mibao_css":"m_weibo",
        "u1":"http://t.qq.com",
        "ptredirect":"1",
        "h":"1",
        "from_ui":"1",
        "fp":"loginerroralert",
        "action":"19-35-1276379",
        "g":'1',
        "t":'3',
        "dummy":"",
        "js_type":'2',
        "js_ver":'10009',
        }

    try:
        opener = urllib2.build_opener()
        opener.addheaders = [headers]
        openness = opener.open(url, None, 30)
        data = openness.read()
        opener.close()
    except:
        print "some error ..."



if __name__ == '__main__':
    myspider("http://www.hao123.com", "hao123")

    #myspider("http://www.tudou.com/", "tudou")
    #myspider("http://wallpaper.pconline.com.cn/pic/22915.html", "")

    #mylogonweibo("360777830", "")


