import chilkat
import MySQLdb
import string

m = MySQLdb
c = m.connect('HOST', 'USERNAME', 'PASSWORD', 'DBNAME')
db = c.cursor()
print db

#  The Chilkat Spider component/library is free.
spider = chilkat.CkSpider()

seenDomains = chilkat.CkStringArray()
seedUrls = chilkat.CkStringArray()

seenDomains.put_Unique(True)
seedUrls.put_Unique(True)

seedUrls.LoadFromFile('newspaperlinks.txt')

#  Set our outbound URL exclude patterns
spider.AddAvoidOutboundLinkPattern("*?id=*")
spider.AddAvoidOutboundLinkPattern("*.mypages.*")
spider.AddAvoidOutboundLinkPattern("*.personal.*")
spider.AddAvoidOutboundLinkPattern("*.comcast.*")
spider.AddAvoidOutboundLinkPattern("*.aol.*")
spider.AddAvoidOutboundLinkPattern("*~*")

#  Use a cache so we don't have to re-fetch URLs previously fetched.
spider.put_CacheDir("c:/spiderCache/")
spider.put_FetchFromCache(True)
spider.put_UpdateCache(True)

while seedUrls.get_Count() > 0 :

    url = seedUrls.pop()
    spider.Initialize(url)

    an = -1


    #  Spider 5 URLs of this domain.
    #  but first, save the base domain in seenDomains
    domain = spider.getUrlDomain(url)
    seenDomains.Append(spider.getBaseDomain(domain))

    for i in range(0,100):
        success = spider.CrawlNext()
        if (success != True):
            break

	a = []
        #  Display the URL we just crawled.
	ae=spider.lastUrl()
        a.append(string.split(string.upper(spider.lastHtmlTitle()), ' '))
        a.append(string.split(string.upper(spider.lastHtmlDescription()), ' '))
        a.append(string.split(string.upper(spider.lastHtmlKeywords()), ' '))


        sql = 'select urlid from url where url="'+ae+'"'
        print db.execute(sql)
    	cd = db.fetchone()
        if not cd:
	    sql = 'insert into url(url, modified) values("'+ae+'", now())'
	    print sql
	    print db.execute(sql)
	    sql = 'select count(url) from url'
	    print sql, db.execute(sql)
	    cd = db.fetchone()


	for x in a:
		for y in x:
			xy = y.translate(string.maketrans("",""), string.punctuation) 
			print url,xy
			sql = 'select id from terms where term="'+xy+'"'
			print sql, db.execute(sql)
			cd2 = db.fetchone()
			if not cd2:
				sql = 'insert into terms(term) values("'+xy+'")'

				print sql, db.execute(sql)
				sql = 'select count(term) from terms'
				print sql, db.execute(sql)
				cd2 = db.fetchone()
				
			if cd and cd2:
				sql = 'insert into term2url(termid, urlid) values('+str(cd2[0])+', '+str(cd[0])+')'
				print sql, db.execute(sql)			

	c.commit()

        #  If the last URL was retrieved from cache,
        #  we won't wait.  Otherwise we'll wait 1 second
        #  before fetching the next URL.
        if (spider.get_LastFromCache() != True):
            spider.SleepMs(1000)

    #  Add the outbound links to seedUrls, except
    #  for the domains we've already seen.
 #   for i in range(0,spider.get_NumOutboundLinks()):

#        url = spider.getOutboundLink(i)
 #       domain = spider.getUrlDomain(url)
  #      baseDomain = spider.getBaseDomain(domain)
   #     if (not seenDomains.Contains(baseDomain)): pass
#            seedUrls.Append(url)

        #  Don't let our list of seedUrls grow too large.
        if (seedUrls.get_Count() > 1000):
            break

db.close()




