import urllib
import re
import itertools
import string
import BeautifulSoup
from datetime import datetime
import time

def extract_text(t):
    if not t:
        return ""
    if isinstance(t, (unicode, str)):
        return t
    return "".join(extract_text(c) for c in t)

def get(N, *PropertyCodeValues):
    res = []
    page = 1
    print "Start time: " + str(datetime.now())
    while True:
        print "Page", page
        data = {
            'Submit': 'Property',
            'N': str(N),
            'bop': 'And',
            'Pagesize': '100', #set to 22 or so for debugging, less items per page to scan
            'Order': 'RATING',
            'Page': str(page),
        }
### backup of default url settings. Note Newegg sorts by int(rating) then by number of ratings
##        data = {
##            'Submit': 'Property',
##            'N': str(N),
##            'bop': 'And',
##            'Pagesize': '100',
##            'Order': 'RATING',
##            'Page': str(page),
##        }
        data = data.items()
        for PropertyCodeValue in PropertyCodeValues:
            data.append(('PropertyCodeValue', PropertyCodeValue))
        url = 'http://www.newegg.com/Product/ProductList.aspx?'+urllib.urlencode(data)
        print url
        text = urllib.urlopen(url).read()
        dom = BeautifulSoup.BeautifulSoup(text)
        pager = dom.find('input', {'name':'Page'})
        if not pager:
            print "lost pager"
            break
        pager = int(pager['value'])
        if pager != page:
            print "lost pager2"
            break
        j = 0
        print "Have page, datetime: " + str(datetime.now())
        for item in dom.findAll('div', {'class':'itemCell'}):
            #print "-----------------------\n\n"
            dom_desc = item.find('span', {'class':'itemDescription'})
            title = dom_desc.contents[0]
            title += " " + extract_text(item.find('ul', {'class':'itemFeatures'}))
            title = title.strip()
            link = dom_desc.parent['href']
            #Scrape shipping costs, $/GB can really be changed by including shipping
            try:
                priceShip = float(item.find('li', {'class':'priceShip'}).text.split(' ',1)[0][1:])
            except:
                priceShip = 0.00
                
            ##Scrape detailed rating information
            item_info = [0,0,0,0,0,0,0.00,100*(page-1)+j,time.time()]  # 100*(page-1)+j will allow us to give a count beyond 99, first page 0-99, second page 100-199, etc

            if item.find('a', {'class':'itemRating'}): #avoid scraping if we don't need to, ie item has 0 reviews... Really saves a lot of time!
                print "d",
                data_d = urllib.urlopen(link).read()
                soup_d = BeautifulSoup.BeautifulSoup(data_d)
                table_d = soup_d.findAll(id=["reviewNumber5", "reviewNumber4", "reviewNumber3", "reviewNumber2", "reviewNumber1"])
               
                for row_d in table_d:
                    item_info [int(str(row_d['id'])[12:13])]= int(row_d.text.replace(',',''))
                item_info [0] = sum(item_info[1:6])
                for i in range(5):
                   item_info[6] = item_info[i+1]*(i+1) + item_info[6]
                if item_info[0] > 0:
                    item_info[6] = item_info[6]/item_info[0]
                    
            #print item_info
            print str(j)+" ", #the trailing comma removes the newline
            j += 1

            try:
                out = re.compile("[^0-9A-Za-z]([0-9.]+)([MGT])B").findall(title.upper())[0]
            except IndexError:
                print "invalid size", repr(title)
                continue
            size = float(out[0])
            if out[1] == 'M':
                size /= 1000.
            elif out[1] == 'T':
                size *= 1000.
            try:
                price = float(extract_text(item.find('li', {'class':'priceFinal'})).split('$')[1].replace(',',''))
            except:
                try:
                    price = float(extract_text(item.find('li', {'class':'priceList'})).split('$')[1].replace(',',''))
                except:
                    print "invalid price", repr(extract_text(item.find('li', {'class':'priceFinal'}))), repr(extract_text(item.find('li', {'class':'priceList'})))
                    continue
            #                   9,  10,   11,       12,   13,     14,         15
            item_info.extend([size,price,priceShip,title,link,size/price,size/(price+priceShip)])
            yield item_info #size, price, title, link, #maybe item_info should be a class? item_info[gbprice] is less confusing than item_info[13]
        print "Page", page, "done"
        #break #for debugging, truncate number of pages processed
        page += 1
