#!/usr/bin/env python3

"""
bandcamp-archive : bca.py

Download streaming quality mp3s from bandcamp.com
and save them in an organized way.

Name Your Price and Free high quality downloads
to be supported later.

FYI: in python2 the default encoding is ascii, in python3 its unicode.
the coding for the html of bandcamp pages is in unicode. we are
discouraged from just switching the default python2 encoding to unicode
because other parts of code (modules/libraries) may break.


"""

#Only for use in Python 2.6.0a2 and later
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals

import os
import sys

#print(sys.version.split()[0])
if sys.version_info < (3,):
    import ConfigParser as configparser
    from urlparse import urlparse
    from urllib import urlopen
    import string
else:
    import configparser
    from urllib.parse import urlparse
    from urllib.request import urlopen
    
#try: #for python3
#    from urllib.request import urlopen
#    from urllib.parse import urlparse
#except ImportError: # for python2
#    from urlparse import urlparse
#    from urllib import urlopen



import argparse
#import urllib.request

import codecs
import textwrap
import subprocess
import poplib


#import Tkinter as Tk

# https://pypi.python.org/pypi/colorama

####from colorama import init
####init()
####from colorama import Fore, Back, Style

#print(Fore.RED + 'some red text')
#print(Back.GREEN + 'and with a green background')
#print(Style.DIM + 'and in dim text')
#print(Style.RESET_ALL)
#print('back to normal now')

#exit(0)

# TODO: check free space of dldir (abandoned)
# TODO: check to make sure dldir is writeable (maybe unnecessary)
# TODO: create functions that work for python 2.7 or 3.4




NYP_MESSAGE = '''
 This album is listed under the "name your price" model, meaning you
 could get higher quality if you manually go to this page, click buy
 and put 0 for the price. It is highly encouraged to support artists
 that allow you to download higher quality for free, since this is
 the correct model of file sharing and they should be rewarded.

'''

FREE_DL_MESSAGE = '''
This album is listed as a free download, a later version will retrieve
a higher quality version. for now we get the streaming quality. dont
forget to support artists who intentionally release their works for
free, as they have the best philosophy about sharing.
'''

NOTNYP_MESSAGE = 'Please consider supporting this artist'

SEPARATOR = '\n ================================================ \n'


# ---- FUNCTIONS ---- #

def initialize_cfg_dict():
    """
    initialize defaults for cfg_dict
    cfg_dict will be our source for setting up all configuration
    options. we set the defaults here.
    """
    default_cfg_dict = {}
    default_cfg_dict['dl_dir'] = "downloads"
    default_cfg_dict['cfg_file'] = "bca.cfg"
    default_cfg_dict['gui_mode'] = False
    default_cfg_dict['nyp_email'] = 'example@example.org'
    default_cfg_dict['nyp_zipcode'] = '00000'
    default_cfg_dict['nyp_email_password'] = 'examplepassword'
    default_cfg_dict['nyp_email_pop_server'] = 'pop3.openmailbox.org'
    default_cfg_dict['nyp_email_pop_server_port'] = '995'
    default_cfg_dict['email_dir'] = 'email'
    default_cfg_dict['save_config'] = False
    return default_cfg_dict

def setup_argument_parser(parser_dict):
    """
    set up all command line arguements and allow them to override the
    defaults provided by initialize_cfg_dict()
    """
    parser = argparse.ArgumentParser()
    parser.add_argument('-c', '--config',
                        dest='config_file',
                        action='store',
                        help='specify an alternate config file')
    parser.add_argument('-D', '--dl-dir',
                        dest='dl_dir',
                        action='store',
                        help='specify where to save stuff')
    parser.add_argument('-S', '--save-config',
                        dest='save_config',
                        action='store_true',
                        help='save current configuration')
    parser.add_argument('-v', '--verbose',
                        dest='verbose',
                        action='store_true',
                        help='show extra info')
    parser.add_argument('--allow-http',
                        dest='allow_http',
                        action='store_true',
                        help='allow HTTP connections')
    parser.add_argument('-g', '--gui',
                        dest='gui_mode',
                        action='store_true',
                        help='use the tk gui (not ready yet)')
    parser.set_defaults(ignore_errors=False)
    parser.set_defaults(allow_http=False)
    parser.set_defaults(save_config=False)
    parser.add_argument('input_url', nargs='?')
    args = parser.parse_args()
    parser_dict = {}
    parser_dict['verbose'] = args.verbose
    parser_dict['allow_http'] = args.allow_http
    parser_dict['input_url'] = args.input_url
    if args.gui_mode:
        parser_dict['gui_mode'] = args.gui_mode
    if args.config_file:
        parser_dict['config_file'] = args.config_file
    if args.dl_dir:
        parser_dict['dl_dir'] = args.dl_dir
    if args.save_config:
        parser_dict['save_config'] = args.save_config
    return parser_dict


def tk_gui():
    # in trisquel do # apt-get install python3-tk
    print('tk_gui')
    #import Tkinter
    #import _tkinter
    import tkinter
    root = tkinter.Tk()
    root.title('bandcamp-archive')
    mainframe = tkinter.Frame(root, padx=3, pady=3)

    url = StringVar()
    examplecheck1 = IntVar()
    feet_entry = tkinter.Entry(mainframe, width=7, textvariable=url)


    tkinter.Checkbutton(root, text="examplecheck1", variable=examplecheck1)
    tkinter.Button(root, text="Hello World").grid()
    # Code to add widgets will go here...
    root.mainloop()
    exit(0)

def write_config_file(wcf_dict):
    """
    save configuration to a config file
    """
    config = configparser.ConfigParser()
    config['bca-main'] = {}
    config['bca-main']['dl_dir'] = wcf_dict['dl_dir']
    config['bca-main']['nyp_email'] = wcf_dict['nyp_email']
    config['bca-main']['nyp_zipcode'] = wcf_dict['nyp_zipcode']
    config['bca-main']['nyp_email_password'] = wcf_dict['nyp_email_password']
    config['bca-main']['nyp_email_pop_server'] = wcf_dict['nyp_email_pop_server']
    config['bca-main']['nyp_email_pop_server_port'] = wcf_dict['nyp_email_pop_server_port']
    config['bca-main']['email_dir'] = wcf_dict['email_dir']
    with open(wcf_dict['cfg_file'], 'w') as configfile:
        config.write(configfile)
        configfile.close()
        print("config file saved")
        print("now re-run without over-riding arguements")
        sys.exit(0)

def read_config_file(rcf_dict):
    """
    read configuration file
    """
    config = configparser.ConfigParser()
    config.read(rcf_dict['cfg_file'])
    rcf_dict['dl_dir'] = config['bca-main']['dl_dir']
    rcf_dict['nyp_email'] = config['bca-main']['nyp_email']
    rcf_dict['nyp_zipcode'] = config['bca-main']['nyp_zipcode']
    rcf_dict['nyp_email_password'] = config['bca-main']['nyp_email_password']
    rcf_dict['nyp_email_pop_server'] = config['bca-main']['nyp_email_pop_server']
    rcf_dict['nyp_email_pop_server_port'] = config['bca-main']['nyp_email_pop_server_port']
    rcf_dict['email_dir'] = config['bca-main']['email_dir']
    return rcf_dict


def make_dl_dir(cfg_dict):
    """
    make the root download dir
    """
    if not os.path.isdir(cfg_dict['dl_dir']):
        if cfg_dict['verbose']:
            print('creating base_download_dir:\n' + cfg_dict['dl_dir'])
        os.makedirs(cfg_dict['dl_dir'])
        # TODO: test what happens if dl_dir is set to a non-writable location
        # TODO: test if parent folders are created
    return

def parse_input_url(cfg_dict):
    """
    break up input_url into smaller pieces
    """
    protocol_in_url = False
    protocol = 'https'
    #print(cfg_dict['input_url'])
    if "://" in cfg_dict['input_url']:
        protocol_in_url = True
        if cfg_dict['allow_http'] and cfg_dict['input_url'].split(':')[0] == 'http':
            if cfg_dict['verbose']:
                print("protocol was specifically declared as http")
            protocol = 'http'
    #TODO: accept non bandcamp urls, check for validity later
    if protocol_in_url:
        sub_domain = cfg_dict['input_url'].split('.')[0].rsplit('/', 1)[1]
    else:
        sub_domain = cfg_dict['input_url'].split('.')[0]
    if sub_domain == 'www':
        sub_domain = cfg_dict['input_url'].split('.')[1]
    if '.bandcamp.com' in cfg_dict['input_url']:
        full_domain = '{}.bandcamp.com'.format(sub_domain)
        url_path = cfg_dict['input_url'].split('.bandcamp.com/', 1)[1]
    elif '/' not in cfg_dict['input_url']:
        print("input_url does not have any forward slashes")
        sys.exit(99)
    elif protocol_in_url:
        full_domain = cfg_dict['input_url'].split('/')[2]
        url_path = cfg_dict['input_url'].split('/', 3)[3]
    else:
        full_domain = cfg_dict['input_url'].split('/')[0]
        url_path = cfg_dict['input_url'].split('/', 1)[1]
    #print(full_domain)
    #print(url_path)
    poster_dl_dir = '{}/{}'.format(cfg_dict['dl_dir_full'], full_domain)
    html_savefile = '{}/{}-tmp.html'.format(poster_dl_dir, sub_domain)
    url_dict = {}
    url_dict['protocol'] = protocol
    url_dict['sub_domain'] = sub_domain
    url_dict['full_domain'] = full_domain
    url_dict['dl_location_poster_base'] = poster_dl_dir
    url_dict['first_page_html_savefile'] = html_savefile
    url_dict['url_path'] = url_path
    url_dict['input_url'] = protocol + '://' + full_domain + '/' + url_path
    #print(url_dict['input_url'])
    return url_dict


def make_base_dir(cfg_dict):
    """
    create the uploader dir inside the root download dir
    """
    dl_location_poster_base = cfg_dict['dl_location_poster_base']
    if os.path.isdir(dl_location_poster_base):
        if cfg_dict['verbose']:
            print('dl_location_poster_base: {} already exists'.format(dl_location_poster_base))
    elif not os.path.isfile(dl_location_poster_base):
        print('creating dl_location_poster_base: {}'.format(dl_location_poster_base))
        #os.makedirs(dl_location_poster_base)
        try:
            os.makedirs(dl_location_poster_base)
        except:
            if cfg_dict['verbose']:
                print("dir already exists: " + dl_location_poster_base)
        if not os.path.isdir(dl_location_poster_base):
            print('dl_location_poster_base {} still does not exist...'.format(dl_location_poster_base))
            sys.exit(99)
    return


def make_content_dir(cfg_dict):
    """
    make the dir named after "artist - title" or an album or track
    inside the base (index) dir, which is inside the root dl_dir
    """
    return


def save_first_page(cfg_dict):
    """
    retrieve and save the first page as indicated by input_url
    """

    downloader(cfg_dict['input_url'], cfg_dict['first_page_html_savefile'])

#    if cfg_dict['verbose']:
#        if cfg_dict['verbose']:
#            print('attempting to download {}'.format(cfg_dict['input_url']))
#    html = urllib.request.urlopen(cfg_dict['input_url'])
#    if html:
#        with open(cfg_dict['first_page_html_savefile'], 'wb') as outfile:
#            outfile.write(html.read())
#        outfile.close()
    return


def detectnyp(html):
    """
    detect if albumpage is name your price
    """
    nyp_clue = 'buyItemExtra buyItemNyp secondaryText">&nbsp;name your price</span>'
    if 'name your price' in html:
        return True
    else:
        return False


def detect_free_download(first_page_html):
    """
    detect if albumpage is a free download
    """
    fd_clue = 'type="button">Free Download</button>'
    if fd_clue in first_page_html:
        return True
    else:
        return False


def determine_page_type(first_page_html, cfg_dict):
    """
    determine the page type
    """
    content_type_album = '<meta property="og:type" content="album">'
    content_type_song = '<meta property="og:type" content="song">'
    content_type_band = '<meta property="og:type" content="band">'
    if content_type_album in first_page_html:
        page_type = 'album'
    elif content_type_song in first_page_html:
        page_type = 'song'
    elif content_type_band in first_page_html:
        page_type = "index"
    else:
        print("cound not determine page_type    :-( ")
        exit(99)
    return page_type


def determine_digital_price(first_page_html, cfg_dict):
    """
    find the price and currency
    """
    # we assume it isnt "name your price"
    find_digital_price_first_clue = '<span class="buyItemPackageTitle primaryText">Digital'

    if not find_digital_price_first_clue in first_page_html:
        print("could not determine price, probably unbuyable from bandcamp page")
        return

    split_html_digital_price_first_list = first_page_html.split(find_digital_price_first_clue, 1)
    split_html_digital_price_first = split_html_digital_price_first_list[1]
    find_digital_price_second_clue = '<span class="base-text-color">'
    split_html_digital_price_second_list = split_html_digital_price_first.split(find_digital_price_second_clue, 1)
    split_html_digital_price_second = split_html_digital_price_second_list[1]
    find_digital_price_third_clue = '</span>'
    split_html_digital_price_third_list = split_html_digital_price_second.split(find_digital_price_third_clue, 1)
    split_html_digital_price_third = split_html_digital_price_third_list[0]
    find_digital_price_fourth_clue = ';'
    if cfg_dict['verbose']:
        print("find_digital_price_first_clue = {}".format(find_digital_price_first_clue))
        print("split_html_digital_price_first_list = {}".format(split_html_digital_price_first_list))
        print("split_html_digital_price_first = {}".format(split_html_digital_price_first))
        print("find_digital_price_second_clue = {}".format(find_digital_price_second_clue))
        print("split_html_digital_price_second_list = {}".format(split_html_digital_price_second_list))
        print("split_html_digital_price_second = {}".format(split_html_digital_price_second))
        print("find_digital_price_third_clue = {}".format(find_digital_price_third_clue))
        print("split_html_digital_price_third_list = {}".format(split_html_digital_price_third_list))
        #print("yyy = {}".format(yyy))
        #print("yyy = {}".format(yyy))
        #print("yyy = {}".format(yyy))
        print("split_html_digital_price_third = {}".format(split_html_digital_price_third))
        print("find_digital_price_fourth_clue = {}".format(find_digital_price_fourth_clue))
    digital_price = split_html_digital_price_third.rsplit(find_digital_price_fourth_clue, 1)[1]
    # it is unknown if secondary merchandise could be a different currency
    #    so instead of a find_currency() function we do it here.
    find_digital_price_currency_clue = '<span class="buyItemExtra secondaryText">'
    find_digital_price_currency_clue_first_list = split_html_digital_price_first.split(find_digital_price_currency_clue, 1)
    find_digital_price_currency_clue_first = find_digital_price_currency_clue_first_list[1]
    digital_price_currency = find_digital_price_currency_clue_first.split(find_digital_price_third_clue, 1)[0]
    final_digital_price_string = digital_price + " " + digital_price_currency
    return final_digital_price_string


def replace_illegal_characters(input_string):
    """ 
    these are the characters that have filesystem issues,
    so we will avoid using them
    """ 
    #TODO: find a python 2/3 compatible way without having to check version
    illegals = ['<', '>', ':', '"', '/', '\\', '|', '?', '*']
    for char in illegals:
        if sys.version_info < (3,):
            input_string = input_string.translate(string.maketrans(char, '_'))
        else:
            input_string = input_string.translate(str.maketrans(char, '_'))
    return input_string


def determine_url_title(album_url):
    # find the album/track name used in the url
    url_title = album_url.rsplit('/', 1)[1]
    # if there is a trailing slash we need to adjust
    if not url_title:
        url_title = album_url.rsplit('/', 2)[1]
    return url_title


def find_artist_name(first_page_html):
    find_artist_string = 'artist: '
    split_html_artist_first_list = first_page_html.split(find_artist_string, 1)
    split_html_artist_first = split_html_artist_first_list[1]
    artist_name = split_html_artist_first.split('"')[1]
    # illegal characters for windows are < > : " / \ | ? *
    artist_name_fixed = replace_illegal_characters(artist_name)
    return artist_name_fixed


def determine_player_type(first_page_html):
    item_type_clue = 'item_type: '
    split_html_item_type_first_list = first_page_html.split(item_type_clue, 1)
    split_html_item_type_first = split_html_item_type_first_list[1]
    item_type_value = split_html_item_type_first.split('"')[1]
    # TODO: if item_type_value is not album or track then exit
    return item_type_value


def find_album_name(first_page_html):
    find_title_string = 'album_title: '
    split_html_title_first_list = first_page_html.split(find_title_string, 1)
    split_html_title_first = split_html_title_first_list[1]
    title_name = split_html_title_first.split('"')[1]
    title_name_fixed = replace_illegal_characters(title_name)
    return title_name_fixed


def find_track_name(first_page_html):
    find_title_string = 'title :'
    split_html_title_first_list = first_page_html.split(find_title_string, 1)
    split_html_title_first = split_html_title_first_list[1]
    title_name = split_html_title_first.split('"')[1]
    title_name_fixed = replace_illegal_characters(title_name)
    return title_name_fixed


def gen_pic_list(first_page_html):
    pic_find_clue = '<a class="popupImage'
    split_pic_html_list = first_page_html.split(pic_find_clue)
    num_of_pics = len(split_pic_html_list) - 1
    pic_url_list = []
    for n in list(range(1, num_of_pics + 1)):
        split_pic_html_first = split_pic_html_list[n]
        split_pic_html_url = split_pic_html_first.split('"')[2]
        pic_url_list.append(split_pic_html_url)
    print('found {} pictures to download'.format(num_of_pics))
    return pic_url_list


def download_pics(pic_url_list, full_folder_path):
    #TODO: get file size via header, and redownload file if it exists and
    #is not the same size as header delcares. I know this is making more
    # requests, but this is an archive program, and this approach ensures
    # we have archived everything as it was intended...
    for piclink in pic_url_list:
        pic_filename = piclink.rsplit('/', 1)[1]
        pic_save_dest = '{}/{}'.format(full_folder_path, pic_filename)
        downloader(piclink, pic_save_dest)
        ##if not os.path.isfile(pic_save_dest) or os.stat(pic_save_dest).st_size == 0:
        ##    if cfg_dict['verbose']:
        ##        print('attempting to download {}'.format(piclink))
        ##    pic_binary_data = urllib.request.urlopen(piclink)
        ##    if pic_binary_data:
        ##        with open(pic_save_dest, 'wb') as outfile:
        ##            outfile.write(pic_binary_data.read())
        ##            outfile.close()
        #elif os.path.isfile(pic_save_dest):
        ##elif cfg_dict['verbose']:
        ##    print('picture {} already exists and is not empty'.format(pic_save_dest))
        ##    print("not attempting to connect")
    return


def create_info_text_file(item_type_value, first_page_html, url_title, full_folder_path):
    if item_type_value == 'album':
        desc_find_clue = '<div class="tralbumData tralbum-about" itemprop="description">'
        if desc_find_clue in first_page_html:
            split_desc_html_list = first_page_html.split(desc_find_clue)
            split_desc_html_first = split_desc_html_list[1]
            desc_html = split_desc_html_first.split('</div>')[0]
        else:
            desc_html = 'description not found'
    elif item_type_value == 'track':
        desc_find_clue = '<meta name="Description" content="'
        if desc_find_clue in first_page_html:
            split_desc_html_list = first_page_html.split(desc_find_clue)
            split_desc_html_first = split_desc_html_list[1]
            desc_html = split_desc_html_first.split('">')[0]
        else:
            desc_html = 'description not found'
    #TODO: create "de-html-ify" function to transalte special characters to plain text
    desc_fixed = desc_html.replace('&quot;', '"')
    desc_wrapped = textwrap.fill(desc_fixed, 72)
    # now get tags
    tag_first_clue = '<a class="tag" href='
    split_tag_html_list = first_page_html.split(tag_first_clue)
    num_of_tags = len(split_tag_html_list) - 1
    tag_dict = {}
    for n in list(range(1, num_of_tags + 1)):
        split_tag_html_first = split_tag_html_list[n]
        split_tag_html_url = split_tag_html_first.split('"')[1]
        split_tag_html_name_first = split_tag_html_first.split('>')[1]
        split_tag_html_name = split_tag_html_name_first.split('<')[0]
        # now we add the url and name pair to the tag_dict dictionary
        tag_dict.update({split_tag_html_url : split_tag_html_name})
    # now lets create a text string of all this info and save it to a file
    info_text_url = 'Url:\n{}\n\n\n'.format(album_url)
    info_text_desc = 'Description:\n{}\n\n\n'.format(desc_wrapped)
    info_text_tags = 'Tags:\n'
    for key, value in tag_dict.items():
        info_text_tags = info_text_tags + '{}\n{}\n\n'.format(value, key)
    # now save it to a file
    info_text_full = info_text_url + info_text_desc + info_text_tags
    info_text_filename = '{}.txt'.format(url_title)
    info_text_save_location = '{}/{}'.format(full_folder_path, info_text_filename)
    with open(info_text_save_location, 'wt') as outfile:
        outfile.write(info_text_full)
        outfile.close()
    return

def download_streaming_quality(first_page_html, full_folder_path):
    if 'trackinfo : [],' in first_page_html or 'trackinfo: [],' in first_page_html:
        print("there are no streamable tracks on this page")
        return()
    first_clue_start = 'trackinfo : [{'
    if not first_clue_start in first_page_html:
        first_clue_start = 'trackinfo: [{'
    first_clue_end = '}]'
    tracks_blob = first_page_html.split(first_clue_start)[1].split(first_clue_end)[0]
    #second_clue = 'track_num'
    second_clue = tracks_blob.split(':')[0]
#    print(second_clue)
#    print(page_type)
    # need to fix position 0 in list split_tracks_blob becasue it is
    # useless and we have to compensate when page is song type.
    item_type_value = determine_player_type(first_page_html)
    if item_type_value == 'track':
        split_tracks_blob = ['compatibilityplaceholder', tracks_blob]
        number_of_tracks = 1
    elif item_type_value == "album":
        split_tracks_blob = tracks_blob.split(second_clue)
        number_of_tracks = len(split_tracks_blob)-1
    track_ids = []
    track_titles = []
    track_urls = []
    id_clue = '"id":'
    title_clue = '"title":'
    url_clue = '"file":'
    third_clue_end = ',"'
    track_dest_filenames = []
    tracks_nostream = []
    listed_range_of_track_numbers = list(range(1, number_of_tracks + 1))
    highest_track_number = listed_range_of_track_numbers[-1]
    highest_track_number_length = len(str(highest_track_number))
    print("found {} tracks".format(highest_track_number))
    for n in listed_range_of_track_numbers:
        # print(split_tracks_blob[n])
#        print(split_tracks_blob[n])
        track_id = split_tracks_blob[n].split(id_clue)[1].split(third_clue_end)[0]
#        print(track_id)
        track_ids.append(track_id)
        #track_title = split_tracks_blob[n].split(title_clue)[1].split(third_clue_end)[0].replace('"','')
        # what if there is supposed to be a " in the title?
        track_title = split_tracks_blob[n].split(title_clue)[1].split('"')[1]
#        print(track_title)
        track_titles.append(track_title)
        track_number = str(n)
        while len(track_number) < highest_track_number_length:
            track_number = "0" + track_number
        print("track {} out of {}".format(track_number, highest_track_number))
        track_filename = track_number + ' - ' + track_title + ' - ' + track_id + '.mp3'
        track_filename = replace_illegal_characters(track_filename)
        track_filename_full = full_folder_path + '/' + track_filename
        if cfg_dict['verbose']:
            print(track_filename)
        track_dest_filenames.append(track_filename)
        track_url = split_tracks_blob[n].split(url_clue)[1].split(third_clue_end)[0]
        if track_url == "null":
            print("    {} : no link found on page".format(track_filename))
            tracks_nostream.append(track_filename)
        else:
            track_url = track_url.split('"')[3].split(':')[1]
            track_url = cfg_dict['protocol'] + ":" + track_url
        if cfg_dict['verbose']:
            print(track_url)

        downloader(track_url, track_filename_full)

            # check header to see if its the same size, rather than if it simply exists
            # what about resuming downloads? or progress?
            # it seems no data is written untill download is complete, meaning we can check if a file is 0 size to see if we nned to redownload.

##            if not os.path.isfile(track_filename) or os.stat(track_filename).st_size == 0:
##                if cfg_dict['verbose']:
##                    print('attempting to download {}'.format(track_url))
##                mp3_binary_data = urllib.request.urlopen(track_url)
##
### progress
### currently this is implemented to not save the binary data
### until download is complete. later we could implement functionality for
### resuming downloads, but the problem is how do we know if a previous
### download was done unless we query every file to get the content-length
### response? is not wasting bandwidth the most important? or is not
### making unnecessary requests the most important? im sure some would say
### redownloading content that you already previously partialy downloaded
### is unnecessary but on the other side i do not want the amount of calls
### to bandcamp.com to be any more than absolutely necessary. defaults
### will be within this scope, and later we will add switches that
### implement the other way that could be opted into.
###
### notes about above: scope has changed, this is an archive program, so
### we must always query every file to ensure the files we have are
### acurate. since that is necessary, download resuming can be allowed.
### we will write the file when every chunk is retrieved, rather than wait
### to write until entire file is retrieved. if local file size does not
### match the remote file header size we do (localfilesize / chunksize) to
### determine the amount of chunks we have so far, then request then
### resume the dl at the next required chunk
##                chunk_size = 8192
##                total_size = int(mp3_binary_data.getheader('Content-Length'))
##                total_num_of_chunks = int((total_size / chunk_size) + (total_size % chunk_size > 0))
##                current_chunk = 0
##                bytes_so_far = 0
##                mp3_binary_data_dump = bytearray()
##                while 1:
##                    chunk = mp3_binary_data.read(chunk_size)
##                    if chunk:
##                        current_chunk += 1
##                        mp3_binary_data_dump += chunk
##                        bytes_so_far += len(chunk)
##                        percent = round((bytes_so_far / total_size) * 100, 1)
##                        if cfg_dict['verbose']:
##                            print('    bytes: {} / {}        chunks: {} / {}        {}%'.format(bytes_so_far, total_size, current_chunk, total_num_of_chunks, percent), end='\r')
##                        else:
##                            print('    {}    {}%'.format(track_filename, percent), end='\r')
##                    else:
##                        with open(track_filename, 'wb') as outfile:
##                            outfile.write(mp3_binary_data_dump)
##                        outfile.close()
##                        break
##                print('')
##                #print('urlopen done')
##                outfilesize = os.stat(track_filename).st_size
##                if not outfilesize == total_size:
##                    #print(outfilesize)
##                    #print(total_size)
##                    print('ERROR: file size does not match the content-length size')
##                    exit(99)
##                else:
##                    #print('file sizes match!')
##                    if cfg_dict['verbose']:
##                        print('download completed successfully!')
##            elif os.path.isfile(track_filename):
##                print(' {} already exists and is not empty'.format(track_filename))
##                print("not attempting to connect")
###        print(track_url)
###    print(tracks_blob.split(second_clue))
###    print(number_of_tracks)
###    exit(0)
    return

def downloader(url, savename):
    """
    download a file to the current directory,
    just give the url and savename
    if content-length head is provided and file already exists
    then if they match, skip, else redownload
    """
    chunk_size = 8192
    display_name = savename.rsplit('/', 1)[1].encode('utf8').decode(sys.stdout.encoding)
    binary_data = urlopen(url)
    #parts = urlparse(url)
    #print(parts)
    #parts = parts._replace(path='/3.0'+parts.path)
    #print(binary_data)
    #print(binary_data.getheaders())
    #print(binary_data.info())

#    print(urlopen(url).info().getheader('Content-length'))

    #print(urllib2.urlopen(url).code)

    try:
        header_content_length = binary_data.getheader('Content-Length')
    except:
        #header_content_length = None
        #TODO: make this logically sound for both python2 and 3
        if sys.version_info < (3,):
            header_content_length = binary_data.info().getheader('Content-Length')
        else:
            header_content_length = None

    if header_content_length:
        total_size = int(header_content_length)
        total_num_of_chunks = int((total_size / chunk_size) + (total_size % chunk_size > 0))
        if os.path.isfile(savename):
            if os.stat(savename).st_size == total_size:
                print('    {} : already fully retrieved'.format(display_name))
                return
    current_chunk = 0
    bytes_so_far = 0
    binary_data_dump = bytearray()
    while 1:
        chunk = binary_data.read(chunk_size)
        if chunk:
            current_chunk += 1
            binary_data_dump += chunk
            bytes_so_far += len(chunk)
            if header_content_length:
                percent = round((bytes_so_far / total_size) * 100, 1)
                if cfg_dict['verbose']:
                    print('    bytes: {} / {}        chunks: {} / {}        {}%'.format(bytes_so_far, total_size, current_chunk, total_num_of_chunks, percent), end='\r')
                else:
                    print('    {}    {}%'.format(display_name, percent), end='\r')
            else:
                print('    {}    bytes-retrieved: {}'.format(display_name, bytes_so_far), end='\r')
        else:
            with open(savename, 'wb') as outfile:
                outfile.write(binary_data_dump)
            outfile.close()
            break
    print('')
    outfilesize = os.stat(savename).st_size
    if header_content_length:
        if not outfilesize == total_size:
            if cfg_dict['verbose']:
                print("outfilesize = " + outfilesize)
                print("header size = " + total_size)
            print('ERROR: file size does not match the content-length size')
            exit(99)
        else:
            if cfg_dict['verbose']:
                print('download completed successfully!')
    return

def give_email_for_nyp(first_page_html, cfg_dict):
    email = cfg_dict['nyp_email']
    zipcode = cfg_dict['nyp_zipcode']
    country = 'United+States'
    item_clue = 'tralbum_param:'
    item_clue_end = '},'
    if not item_clue in first_page_html:
        item_clue = 'tralbum_param :'
    if item_clue in first_page_html:
        id_and_type = first_page_html.split(item_clue)[1].split(item_clue_end)[0]
        item_clue = 'name:'
        if not item_clue in id_and_type:
            item_clue = 'name :'
        type_clue = 'value:'
        if not type_clue in id_and_type:
            type_clue = 'value :'
        item_type = id_and_type.split(item_clue)[1].split(',')[0].replace('"', '').replace(' ', '')
        item_id = id_and_type.split(type_clue)[1].replace(' ', '')
    else:
        print('could not find item id')
        exit(99)
    print('id = {}, type = {}, email={}, country={}, zipcode={}'.format(item_id, item_type, email, country, zipcode))

    if email == 'example@example.org' or zipcode == '00000':
        print("configuration for nyp was not set")
        exit(99)
    url = cfg_dict['input_url_protocol'] + '://' + cfg_dict['full_domain'] + '/' + 'email_download'
    data = urllib.parse.urlencode({'encoding_name': 'none',
                                   'item_id': item_id,
                                   'item_type': item_type,
                                   'address': email,
                                   'country': country,
                                   'postcode': zipcode})
    binary_data = data.encode('UTF-8')
    print(url)
    print(data)
#    response = urllib.request.urlopen(url, binary_data)
#    print(response)
#    exit(0)
    return

def get_email(cfg_dict):
    email_dir = cfg_dict['email_dir']
    address = cfg_dict['nyp_email']
    password = cfg_dict['nyp_email_password']
    server = cfg_dict['nyp_email_pop_server']
    port = cfg_dict['nyp_email_pop_server_port']
    print('email_dir = {} , address = {} , password = {} , server = , port = '.format(email_dir, address, password, server, port))
    os.makedirs(email_dir)
    # check if email_dir exists as a dir, and if not create it
#    exit(0)

def grab_album(album_url, cfg_dict):
    print(SEPARATOR)
    print("job {} out of {}".format(current_album_number, number_of_albums))
    url_title = determine_url_title(album_url)
    if page_type is "index": # if input link was a player page we already have the html we need, but if it was an index we need to grab it

        downloader(album_url, first_page_html_savefile)

        ##if not os.path.isfile(first_page_html_savefile):
        ##    # get html of url
        ##    if cfg_dict['verbose']:
        ##        print('attempting to download {}'.format(album_url))
        ##    first_page_html = urllib.request.urlopen(album_url)
        ##    # and then save it to a file if not empty
        ##    if first_page_html:
        ##        with open(first_page_html_savefile, 'wb') as outfile:
        ##            outfile.write(first_page_html.read())
        ##        outfile.close()
    if os.path.isfile(first_page_html_savefile):
        #with open(first_page_html_savefile, encoding="utf8") as outfile:
        with codecs.open(first_page_html_savefile, 'r', 'utf-8') as outfile:
            first_page_html = outfile.read()
            outfile.close()
            artist_name = find_artist_name(first_page_html)
            item_type_value = determine_player_type(first_page_html)
            if item_type_value == 'album':
                title_name_fixed = find_album_name(first_page_html)
            elif item_type_value == 'track':
                title_name_fixed = find_track_name(first_page_html)
            disp_artist = artist_name.encode('utf8').decode(sys.stdout.encoding)
            disp_title = title_name_fixed.encode('utf8').decode(sys.stdout.encoding)

            print()
            print("Artist: " + disp_artist)
            print("Title: " + disp_title)

            if detectnyp(first_page_html):
                print(NYP_MESSAGE)
                #if cfg_dict['debug_high_quality_nyp']:
                #    give_email_for_nyp(first_page_html, cfg_dict)
                #    get_email()
                #    get_link()
                #    download_zip()
                #    extract_zip()
                #    return()
            elif detect_free_download(first_page_html):
                print(FREE_DL_MESSAGE)
            else:
                try:
                    digital_price = determine_digital_price(first_page_html, cfg_dict)
                    print()
                    print(NOTNYP_MESSAGE)
                    print("The digital download price is {}".format(digital_price))
                    print()
                except Exception as e:
                    print("there was an error determining the price. skipping")
                    print("the error was:", e)

            folder_name = '{} - {}'.format(artist_name, title_name_fixed)
            full_folder_path = '{}/{}'.format(cfg_dict['dl_location_poster_base'], folder_name)
            # make folder for album
            #os.makedirs(full_folder_path)
            try:
                os.makedirs(full_folder_path)
            except:
                if cfg_dict['verbose']:
                    print("dir already exists: " + full_folder_path)
            # now move the downloaded html file into this new dir
            #os.replace(first_page_html_savefile, "{}/{}.html".format(full_folder_path, url_title))
            final_html_save_location = "{}/{}.html".format(full_folder_path, url_title)
            try:
                os.rename(first_page_html_savefile, final_html_save_location)
            except:
                if cfg_dict['verbose']:
                    print("file already exists: " + final_html_save_location)
            # gen list of links for images we want
            pic_url_list = gen_pic_list(first_page_html)
            download_pics(pic_url_list, full_folder_path)
            print()
            # now get description,
            create_info_text_file(item_type_value, first_page_html, url_title, full_folder_path)
            # now we change our working directory to where we want to save mp3s
            #    and we then run youtube-dl
            ####os.chdir(full_folder_path)
            #if cfg_dict['use_internal_downloader']:
            #    download_streaming_quality(first_page_html)
            #else:
            #    run_ytdl(album_url)
            download_streaming_quality(first_page_html, full_folder_path)
            ####os.chdir(cfg_dict['dl_dir'])
    return

def genlinks1(first_page_html, cfg_dict):
    print('attempting parsing method 1')
    album_link_clue = '<li data-item-id='
    split_allbum_html_list = first_page_html.split(album_link_clue)
    num_of_albums = len(split_allbum_html_list)-1
    print("found {} albums to download".format(num_of_albums))
    album_link_list = []
#    print(split_allbum_html_list)
    if num_of_albums < 1:
        album_link_list = genlinks2(first_page_html, cfg_dict['input_url'])
    else:
        for n in list(range(1, num_of_albums + 1)):
            split_allbum_html_first = split_allbum_html_list[n]
            split_album_html_url = split_allbum_html_first.split('"')[9]
            type_prefix = split_album_html_url.split('/')[1]
            if not type_prefix == 'album' or type_prefix == 'track':
                pass
            else:
                full_album_url = cfg_dict['protocol'] + '://' + cfg_dict['full_domain'] + split_album_html_url
                album_link_list.append(full_album_url)
    return album_link_list


def genlinks2(first_page_html, cfg_dict):
    print('attempting parsing method 2, since 0 albums were found with parsing method 1')
    album_link_clue = 'class="indexpage_list_cell'
    split_allbum_html_list = first_page_html.split(album_link_clue)
    num_of_albums = len(split_allbum_html_list) - 1
    print("found {} albums to download".format(num_of_albums))
    album_link_list = []
    if num_of_albums < 1:
        print('still couldnt find any albums :-( ')
        exit(99)
    else:
        for n in list(range(1, num_of_albums + 1)):
            split_allbum_html_first = split_allbum_html_list[n]
            split_album_html_url = split_allbum_html_first.split('"')[22]
            type_prefix = split_album_html_url.split('/')[1]
            #print(type_prefix)
            if not type_prefix == 'album' or type_prefix == 'track':
                # link returned is not in the scope of this program,
                # functionality for links to other artists bandcamp pages
                # could be added later, therefore break before appended
                pass
            else:
#                print(cfg_dict['protocol'])
#                print(cfg_dict['full_domain'])
#                print(split_album_html_url)
#                full_album_url = cfg_dict['protocol'] + '://' + cfg_dict['full_domain'] + split_album_html_url

                # FIXME: above didnt work! manually overrode dictionary
                # variables
#                full_album_url = 'https://chondriticsound.bandcamp.com' + split_album_html_url
                album_link_list.append(full_album_url)
#    print(album_link_list)
    return album_link_list


# ---- MAIN INIT LOGIC ---- #


if __name__ == '__main__':
    cfg_dict = {}
    cfg_dict = initialize_cfg_dict()
##    cfg_dict = read_config_file(cfg_dict)
    if os.path.isfile(cfg_dict['cfg_file']):
        cfg_dict.update(read_config_file(cfg_dict))
    cfg_dict.update(setup_argument_parser(cfg_dict))
    if cfg_dict['verbose']:
        print('cfg_dict = {}'.format(cfg_dict))
        print("argv = {}".format(sys.argv))
    if cfg_dict['save_config']:
        write_config_file(cfg_dict)
    if not cfg_dict['input_url']:
        print('url must be provided')
        exit(1)
    make_dl_dir(cfg_dict)
    cfg_dict['dl_dir_full'] = os.path.abspath(cfg_dict['dl_dir'])
    print('DL_DIR = ' + cfg_dict['dl_dir_full'])
    cfg_dict.update(parse_input_url(cfg_dict))
    make_base_dir(cfg_dict)
    save_first_page(cfg_dict)
    first_page_html_savefile = cfg_dict['first_page_html_savefile']
    if cfg_dict['gui_mode']:
        tk_gui()
#    if cfg_dict['debug_get_email']:
#        get_email(cfg_dict)
    if os.path.isfile(cfg_dict['first_page_html_savefile']):
        #with open(cfg_dict['first_page_html_savefile'], encoding='utf-8') as outfile:
        with codecs.open(cfg_dict['first_page_html_savefile'], 'r', 'utf-8') as outfile:
            first_page_html = outfile.read()#.decode('utf-8')
            outfile.close()
            page_type = determine_page_type(first_page_html, cfg_dict)
            print("page_type = " + repr(page_type))
            if page_type == "album" or page_type == "song":
                print('url {} is for an album/track'.format(cfg_dict['input_url']))
                album_url = cfg_dict['input_url']
                current_album_number = 1
                number_of_albums = 1
                grab_album(album_url, cfg_dict)
            elif page_type is "index":
                print('url {} is for an uploaders index'.format(cfg_dict['input_url']))
                album_urls_list = genlinks1(first_page_html, cfg_dict)
                number_of_albums = len(album_urls_list)
                current_album_number = 1
                #if cfg_dict['print_links']:
                #    for link in album_urls_list:
                #        print(link)
                #    exit(0)
                index_html_file_location = "{}/{}-index.html".format(cfg_dict['dl_location_poster_base'], cfg_dict['sub_domain'])
                # if file exists under windows, an error will be returned, but not under unix... python2 doesnt have replace()
                # https://docs.python.org/3/library/os.html
                try:
                    os.rename(cfg_dict['first_page_html_savefile'], index_html_file_location)
                    print(cfg_dict['first_page_html_savefile'] + ' has been renamed to ' + index_html_file_location)
                except:
                    if cfg_dict['verbose']:
                        print("file already exists: " + index_html_file_location)
                        # str(time.time()).split('.')[0] # how to get unixtime
                for album_url in album_urls_list:
                    grab_album(album_url, cfg_dict)
                    current_album_number = current_album_number + 1

