import opie
import chilkat
import re

o = opie.object_factory()
o.create_db('HOST', 'USERNAME', 'PASSWORD', 'DBNAME')

u = opie.user_manager()
uu = u.create_user('jack', 'saddle', 'email@gmail.com')
uu.login('jack', 'saddle')

l = opie.link_manager()

class EmailScraper:

	def __init__(self):
                self.emails = []    
	def reset(self):        
		self.emails = []
	def email(self, htmlSource):
#        "collects all possible email addresses from a string, but still it can miss some addresses"       
 #example: t.s@d.com	
		email_pattern = re.compile("[-a-zA-Z0-9._]+@[-a-zA-Z0-9_]+.[a-zA-Z0-9_.]+")
        	self.emails = re.findall(email_pattern, htmlSource)            
	def collectEmail(self, htmlSource):	        			
#	"collects all emails that starts with mailto: in the html source string"        
#example: <a href="mailto:t.s@d.com">
        	email_pattern = re.compile("<a\s+href=\"mailto:([a-zA-Z0-9._@]*)\">", re.IGNORECASE)
        	self.emails = re.findall(email_pattern, htmlSource)
		return self.emails

#  The Chilkat Spider component/library is free.
spider = chilkat.CkSpider()
hana = EmailScraper()

seenDomains = chilkat.CkStringArray()
seedUrls = chilkat.CkStringArray()

seenDomains.put_Unique(True)
seedUrls.put_Unique(True)

seedUrls.Append("")

#  Set our outbound URL exclude patterns
spider.AddAvoidOutboundLinkPattern("*?id=*")
spider.AddAvoidOutboundLinkPattern("*.mypages.*")
spider.AddAvoidOutboundLinkPattern("*.personal.*")
spider.AddAvoidOutboundLinkPattern("*.comcast.*")
spider.AddAvoidOutboundLinkPattern("*.aol.*")
spider.AddAvoidOutboundLinkPattern("*~*")

#  Use a cache so we don't have to re-fetch URLs previously fetched.
spider.put_CacheDir("c:/spiderCache/")
spider.put_FetchFromCache(True)
spider.put_UpdateCache(True)

while seedUrls.get_Count() > 0 :

    url = seedUrls.pop()
    spider.Initialize(url)

    #  Spider 5 URLs of this domain.
    #  but first, save the base domain in seenDomains
    domain = spider.getUrlDomain(url)
    seenDomains.Append(spider.getBaseDomain(domain))

    for i in range(0,5):
        success = spider.CrawlNext()
        if (success != True):
            break

        #  Display the URL we just crawled.
        print spider.lastUrl()
	print hana.email(spider.lastHtml())
	print hana.collectEmail(spider.lastHtml())
	print spider.lastHtmlKeywords()

  # COREVET Functions
  email = o.create_obj(uu)
  ab = []
  for x in hana.email(spider.lastHtml()):
    ab.append(x)
  for y in hana.collectEmail(spider.lastHtml())
    ab.append(y)
  email.set_attr('emails', email)
  keyword = o.create_obj(uu)
  data = []
  for z in spider.lastHtmlKeywords():
    data.append(z)
  keyword.set_attr('keywords', data)
  
  ll = l.create_link(spider.lastUrl())
  ll.set_attr(str(keyword.id), 'keywords')
  ll.set_attr(str(email.id), 'emails')

        #  If the last URL was retrieved from cache,
        #  we won't wait.  Otherwise we'll wait 1 second
        #  before fetching the next URL.
        if (spider.get_LastFromCache() != True):
            spider.SleepMs(1000)

    #  Add the outbound links to seedUrls, except
    #  for the domains we've already seen.
    for i in range(0,spider.get_NumOutboundLinks()):

        url = spider.getOutboundLink(i)
        domain = spider.getUrlDomain(url)
        baseDomain = spider.getBaseDomain(domain)
        if (not seenDomains.Contains(baseDomain)):
            seedUrls.Append(url)

        #  Don't let our list of seedUrls grow too large.
        if (seedUrls.get_Count() > 1000):
            break


