#!/usr/bin/python
# -*- coding: utf-8 -*-

# import modules here
import sys
import urllib.request
import sqlite3 as sqlite
from bs4 import BeautifulSoup

# function definitions
def get_raw_data(sUrl):

        #  scrapper object
        oScrapper = urllib.request.FancyURLopener(sProxy)

        return BeautifulSoup(oScrapper.open(sUrl).read().decode('utf-8')).get_text()


# function definitions
def get_nav_data(sData):

	# markers that will help identify the NAV data in the raw text
	START_MARKER = 'NAV'
	END_MARKER = 'Day Change'

	# extract the NAV data from the raw text returned by get_raw_data function
	sData = sData[(sData.find(START_MARKER) + len(START_MARKER)) : sData.find(END_MARKER)]

	# strip out any UTF-8 encoded white space and convert string to array
	aNavData = sData.replace(u'\xa0', u' ').split(' ')

	# return the NAV data - third element in the array
	return aNavData[2]

if __name__ == '__main__':

        # define variables
        PROXY = "http://ukwebproxy.na.blkint.com:8080/"
        #url = 'http://www.morningstar.co.uk/uk/snapshot/snapshot.aspx?id=F0GBR04G73'

	# initialize objects
        oDb = None
        oScrapper = None
        sProxy = {'http': PROXY}

	# initialize objects
	# db connection
        oDb = sqlite.connect('C:\dev\python\sqlite\data\datadump.sqlite')
	
	# start
        with oDb:
                oDb.row_factory = sqlite.Row
                oDataSet = oDb.cursor()
                oDataSet.execute("SELECT * FROM url")
                oRecordSet = oDataSet.fetchall()

                for oRecord in oRecordSet:
                        # print(dr["id"], " | ", dr["url"], " | ", dr["pman_fund_id"])
                        sUrl = oRecord["url"]
                        sData = get_raw_data(sUrl)
                        sNavData = get_nav_data(sData)
                        print ( oRecord["id"], sNavData )
