'''
Generator of random linked sites structures.
(c) 2007 Gregory Petrosyan
'''

import random as r
import itertools as it
import db_classes as dbc


site_ids = it.count(1)
page_ids = it.count(1)
link_ids = it.count(1)
kwd_ids  = it.count(1)

gauss = r.gauss
sample = r.sample
choice = r.choice

def rand(x):
    return int(abs(gauss(x, x/3.)))

try:
    import psyco
    psyco.bind(rand)
except ImportError:
    print 'warning: can\'t use psyco'


def rand_site():
    i = site_ids.next()
    rand_tci = r.randint(0, 1000) * 100
    return dbc.site(i, rand_site_url(i), rand_tci)

def rand_site_url(i):
    site_names = ('yandex', 'slashdot', 'amazon', 'lenta', 'example',
                  'homepage', 'blog', 'sport', 'google')
    domains = ('.org', '.com', '.ru', '.ua', '.spb.ru', '.msk.ru', '.co.uk')
    return 'http://www.' + choice(site_names) + '-' + str(i) + \
           choice(domains)


def rand_pages(site, n_avg, depth_avg):
    n = rand(n_avg) + 1
    return [rand_page(site, depth_avg) for i in xrange(n)]

def rand_page(site, depth_avg):
    id = page_ids.next()
    depth = rand(depth_avg)
    url = rand_page_url(id, site.url, depth)
    pr = min(rand(site.tci / 10000), 10)
    text = 'some text here'
    return dbc.page(id, site.site_id, url, depth, pr, text)

def rand_page_url(id, site_url, depth):
    dirs = ('news', 'misk', 'articles', 'data', 'files', 'people')
    ext = choice(('.htm', '.html', '.xml', '.xhtml'))
    return site_url + '/' + choice(dirs) + '/' + str(id) + ext


def rand_links(pages, ndifflinks):
    nsites = len(pages)
    links = []
    std_title = 'Visit this cool page!'
    std_text = 'click here to go to %s'
    while ndifflinks > 0:
        x = rand(ndifflinks/nsites) + 1
        for page in r.sample(choice(pages), x):
            links.append(dbc.link(link_ids.next(), page.page_id,
                                  std_title, std_text % page.url))
        ndifflinks -= x
    return links


def rand_kwd(i, popularity_avg=10):
    return dbc.keyword(kwd_ids.next(), 'kwd_' + str(i), rand(popularity_avg))


_p2l = dbc.page_to_link
def rand_pages_to_links(pages, links, nlinks_avg):
    ps_to_ls = []
    for site in pages:
        for page in site:
            pid = page.page_id
            for link in sample(links, nlinks_avg):
                ps_to_ls.append(_p2l(pid, link.link_id, rand(nlinks_avg)))
    return ps_to_ls


_p2k = dbc.page_to_keyword
def rand_pages_to_kwds(pages, kwds, nkwds_avg, hot_kwd, relevance_avg=10):
    ps_to_kwds = []
    for site in pages:
        for page in site:
            page_id = page.page_id
            pkwds = sample(kwds, rand(nkwds_avg))
            if hot_kwd and pkwds:
                pkwds[-1] = hot_kwd
            for kwd in pkwds:
                ps_to_kwds.append(_p2k(page_id, kwd.keyword_id, rand(relevance_avg)))
    return ps_to_kwds


_l2k = dbc.link_to_keyword
def rand_links_to_kwds(links, kwds, nkwdsl_avg, relevance_avg=10):
    ls_to_kwds = []
    for link in links:
        link_id = link.link_id
        for kwd in sample(kwds, rand(nkwdsl_avg)):
            ls_to_kwds.append(_l2k(link_id, kwd.keyword_id, rand(relevance_avg)))
    return ls_to_kwds


def generate(nsites,
             npages_avg, pdepth_avg,
             ndifflinks, nlinks_avg,
             ndiffkwds, nkwds_avg, nkwdsl_avg,
             uniform=True):
    '''
    nsites -- number of sites in base
    npages_avg -- average number of pages per sites
    pdepth_avg -- average page depth
    ndifflinks -- number of different links in base
    nlinks_avg -- average number of links per page
    ndiffkwds  -- number of different keywords in base
    nkwds_avg  -- average number of keywords per page
    nkwdsl_avg -- average number of keywords per link
    '''
    sites = [rand_site() for i in xrange(nsites)]
    pages = [rand_pages(s, npages_avg, pdepth_avg) for s in sites]
    links = rand_links(pages, ndifflinks)
    kwds  = [rand_kwd(i) for i in xrange(ndiffkwds)]
    pages_to_links = rand_pages_to_links(pages, links, nlinks_avg)
    links_to_kwds = rand_links_to_kwds(links, kwds, nkwdsl_avg)

    hot_kwd = dbc.keyword(kwd_ids.next(), 'hot_kwd')
    if uniform:
        kwds.append(hot_kwd)
        hot_kwd = None
    else:
        yield hot_kwd
    pages_to_kwds = rand_pages_to_kwds(pages, kwds, nkwds_avg, hot_kwd)

    for s in pages:
        for p in s:
            yield p
    for x in it.chain(sites, links, kwds, pages_to_links,
                      pages_to_kwds, links_to_kwds):
        yield x

