import codecs
import sys
from io import BytesIO

import PIL
import requests
from PIL import Image
# import pearsonr
from scipy.stats import pearsonr
from datetime import datetime

sys.stdout = codecs.getwriter('utf-8')(sys.stdout.detach())

# Q. Import numpy as np and print the version number.

import numpy as np

print(np.__version__)

# Q. Create a 1D array of numbers from 0 to 9
arr = np.arange(10)

# Q. Create a 3×3 numpy array of all True’s
np.full((3, 3), True, dtype=bool)
np.ones((3, 3), dtype=bool)

# Q. Extract all odd numbers from arr
arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])

arr[arr % 2 == 1]
# Q. Replace all odd numbers in arr with -1
arr[arr % 2 == 1] = -1

# Q. Replace all odd numbers in arr with -1 without changing arr
arr = np.arange(10)
out = np.where(arr % 2 == 1, -1, arr)
print(arr)

# Q. Convert a 1D array to a 2D array with 2 rows
arr = np.arange(10)
arr.reshape(2, -1)  # Setting to -1 automatically decides the number of cols

# Q. Stack arrays a and b vertically
a = np.arange(10).reshape(2, -1)
b = np.repeat(1, 10).reshape(2, -1)

np.concatenate([a, b], axis=0)
np.vstack([a, b])
np.r_[a, b]

# Q. Stack the arrays a and b horizontally.
a = np.arange(10).reshape(2, -1)
b = np.repeat(1, 10).reshape(2, -1)

np.concatenate([a, b], axis=1)
np.hstack([a, b])
np.c_[a, b]

# Q. Create the following pattern without hardcoding. Use only numpy functions and the below input array a.
a = np.array([1, 2, 3])

np.r_[np.repeat(a, 3), np.tile(a, 3)]

# Q. Get the common items between a and b
a = np.array([1, 2, 3, 2, 3, 4, 3, 4, 5, 6])
b = np.array([7, 2, 10, 2, 7, 4, 9, 4, 9, 8])
np.intersect1d(a, b)

# Q. From array a remove all items present in array b
a = np.array([1, 2, 3, 4, 5])
b = np.array([5, 6, 7, 8, 9])

np.setdiff1d(a, b)

# Q. Get the positions where elements of a and b match
a = np.array([1, 2, 3, 2, 3, 4, 3, 4, 5, 6])
b = np.array([7, 2, 10, 2, 7, 4, 9, 4, 9, 8])

np.where(a == b)

# Q. Get all items between 5 and 10 from a.
a = np.arange(15)

# Method 1
index = np.where((a >= 5) & (a <= 10))
a[index]

# Method 2:
index = np.where(np.logical_and(a >= 5, a <= 10))
a[index]
# > (array([6, 9, 10]),)

# Method 3: (thanks loganzk!)
a[(a >= 5) & (a <= 10)]


# Q. Convert the function maxx that works on two scalars, to work on two arrays.
def maxx(x, y):
    """Get the maximum of two items"""
    if x >= y:
        return x
    else:
        return y


pair_max = np.vectorize(maxx, otypes=[float])

a = np.array([5, 7, 9, 8, 6, 4, 5])
b = np.array([6, 3, 4, 8, 9, 7, 1])

pair_max(a, b)

# Q. Swap columns 1 and 2 in the array arr.
arr = np.arange(9).reshape(3, 3)
# Solution
arr[:, [1, 0, 2]]

# Q. Swap rows 1 and 2 in the array arr:
arr = np.arange(9).reshape(3, 3)

arr[[1, 0, 2], :]

# Q. Reverse the rows of a 2D array arr.
arr = np.arange(9).reshape(3, 3)
arr[::-1]

# Q. Reverse the columns of a 2D array arr
arr = np.arange(9).reshape(3, 3)
arr[:, ::-1]

# Q. Create a 2D array of shape 5x3 to contain random decimal numbers between 5 and 10
arr = np.arange(9).reshape(3, 3)
# Solution Method 1:
rand_arr = np.random.randint(low=5, high=10, size=(5, 3)) + np.random.random((5, 3))
# print(rand_arr)
# Solution Method 2:
rand_arr = np.random.uniform(5, 10, size=(5, 3))
print(rand_arr)

# Q. Print or show only 3 decimal places of the numpy array rand_arr
rand_arr = np.random.random((5, 3))
# Create the random array
rand_arr = np.random.random([5, 3])
# Limit to 3 decimal places
np.set_printoptions(precision=3)
rand_arr[:4]

# Q. Pretty print rand_arr by suppressing the scientific notation (like 1e10)
# Reset printoptions to default
np.set_printoptions(suppress=False)
# Create the random array
np.random.seed(100)
rand_arr = np.random.random([3, 3]) / 1e3
rand_arr

np.set_printoptions(suppress=True, precision=6)  # precision is optional
rand_arr

# Q. Limit the number of items printed in python numpy array a to a maximum of 6 elements.
np.set_printoptions(threshold=6)
a = np.arange(15)
a

# Q. Print the full numpy array a without truncating.
np.set_printoptions(threshold=6)
a = np.arange(15)

# np.set_printoptions(threshold=np.nan)

np.set_printoptions(threshold=sys.maxsize)

# Q. Import the iris dataset keeping the text intact.
# url = 'https://gitee.com/deep-marl/deepreinforcementlearning/blob/master/Python资料/iris.data'
url = 'https://ncut-ai.github.io/data-test/iris.data'
iris = np.genfromtxt(url, delimiter=',', dtype='object')
names = ('sepallength', 'sepalwidth', 'petallength', 'petalwidth', 'species')

# Print the first 3 rows
iris[:3]

# Q. Extract the text column species from the 1D iris imported in previous question.
url = 'https://ncut-ai.github.io/data-test/iris.data'
iris_1d = np.genfromtxt(url, delimiter=',', dtype=None)
print(iris_1d.shape)

# Solution:
species = np.array([row[4] for row in iris_1d])
species[:5]

# Q. Convert the 1D iris to 2D array iris_2d by omitting the species text field.
# Q. Find the mean, median, standard deviation of iris's sepallength (1st column)
url = 'https://ncut-ai.github.io/data-test/iris.data'
iris_1d = np.genfromtxt(url, delimiter=',', dtype=None)
# Input
url = 'https://ncut-ai.github.io/data-test/iris.data'
iris = np.genfromtxt(url, delimiter=',', dtype='object')
sepallength = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0])

# Solution
mu, med, sd = np.mean(sepallength), np.median(sepallength), np.std(sepallength)
print(mu, med, sd)

# Q. Create a normalized form of iris's sepallength whose values range exactly between 0 and 1 so that the minimum has value 0 and maximum has value 1.
url = 'https://ncut-ai.github.io/data-test/iris.data'
sepallength = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0])

# Solution
Smax, Smin = sepallength.max(), sepallength.min()
S = (sepallength - Smin) / (Smax - Smin)
# or
S = (sepallength - Smin) / sepallength.ptp()  # Thanks, David Ojeda!
print(S)
# Q. Compute the softmax score of sepallength.
url = 'https://ncut-ai.github.io/data-test/iris.data'
iris = np.genfromtxt(url, delimiter=',', dtype='object')
sepallength = np.array([float(row[0]) for row in iris])


# Solution
def softmax(x):
    """Compute softmax values for each sets of scores in x.
    https://stackoverflow.com/questions/34968722/how-to-implement-the-softmax-function-in-python"""
    e_x = np.exp(x - np.max(x))
    return e_x / e_x.sum(axis=0)


print(softmax(sepallength))

# Q. Find the 5th and 95th percentile of iris's sepallength
url = 'https://ncut-ai.github.io/data-test/iris.data'
sepallength = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0])
# Solution
np.percentile(sepallength, q=[5, 95])

# Q. Insert np.nan values at 20 random positions in iris_2d dataset
url = 'https://ncut-ai.github.io/data-test/iris.data'
iris_2d = np.genfromtxt(url, delimiter=',', dtype='object')

# Method 1
i, j = np.where(iris_2d)

# i, j contain the row numbers and column numbers of 600 elements of iris_x
np.random.seed(100)
iris_2d[np.random.choice((i), 20), np.random.choice((j), 20)] = np.nan

# Method 2
np.random.seed(100)
iris_2d[np.random.randint(150, size=20), np.random.randint(4, size=20)] = np.nan

# Print first 10 rows
print(iris_2d[:10])

# Q. Find the number and position of missing values in iris_2d's sepallength (1st column)
url = 'https://ncut-ai.github.io/data-test/iris.data'
iris_2d = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0, 1, 2, 3])
iris_2d[np.random.randint(150, size=20), np.random.randint(4, size=20)] = np.nan

# Solution
print("Number of missing values: \n", np.isnan(iris_2d[:, 0]).sum())
print("Position of missing values: \n", np.where(np.isnan(iris_2d[:, 0])))

# Q. Filter the rows of iris_2d that has petallength (3rd column) > 1.5 and sepallength (1st column) < 5.0
url = 'https://ncut-ai.github.io/data-test/iris.data'
iris_2d = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0, 1, 2, 3])
# Solution
condition = (iris_2d[:, 2] > 1.5) & (iris_2d[:, 0] < 5.0)
iris_2d[condition]

# Q. Select the rows of iris_2d that does not have any nan value.
url = 'https://ncut-ai.github.io/data-test/iris.data'
iris_2d = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0, 1, 2, 3])
iris_2d[np.random.randint(150, size=20), np.random.randint(4, size=20)] = np.nan
# Solution
# No direct numpy function for this.
# Method 1:
any_nan_in_row = np.array([~np.any(np.isnan(row)) for row in iris_2d])
iris_2d[any_nan_in_row][:5]
# Method 2: (By Rong)
iris_2d[np.sum(np.isnan(iris_2d), axis=1) == 0][:5]

# Q. Find the correlation between SepalLength(1st column) and PetalLength(3rd column) in iris_2d
url = 'https://ncut-ai.github.io/data-test/iris.data'
iris = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0, 1, 2, 3])

# Solution 1
np.corrcoef(iris[:, 0], iris[:, 2])[0, 1]

# Solution 2
corr, p_value = pearsonr(iris[:, 0], iris[:, 2])
print(corr)

# Q. Find out if iris_2d has any missing values.
url = 'https://ncut-ai.github.io/data-test/iris.data'
iris_2d = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0, 1, 2, 3])

np.isnan(iris_2d).any()

# Q. Replace all ccurrences of nan with 0 in numpy array
url = 'https://ncut-ai.github.io/data-test/iris.data'
iris_2d = np.genfromtxt(url, delimiter=',', dtype='float', usecols=[0, 1, 2, 3])
iris_2d[np.random.randint(150, size=20), np.random.randint(4, size=20)] = np.nan

# Solution
iris_2d[np.isnan(iris_2d)] = 0
iris_2d[:4]

# Q. Find the unique values and the count of unique values in iris's species
url = 'https://ncut-ai.github.io/data-test/iris.data'
iris = np.genfromtxt(url, delimiter=',', dtype='object')
names = ('sepallength', 'sepalwidth', 'petallength', 'petalwidth', 'species')
# Solution
# Extract the species column as an array
species = np.array([row.tolist()[4] for row in iris])
# Get the unique values and the counts
np.unique(species, return_counts=True)

# Q. Bin the petal length (3rd) column of iris_2d to form a text array, such that if petal length is:Less than 3 --> 'small'
# 3-5 --> 'medium'
# '>=5 --> 'large'
url = 'https://ncut-ai.github.io/data-test/iris.data'
iris = np.genfromtxt(url, delimiter=',', dtype='object')
names = ('sepallength', 'sepalwidth', 'petallength', 'petalwidth', 'species')
# Bin petallength
petal_length_bin = np.digitize(iris[:, 2].astype('float'), [0, 3, 5, 10])
# Map it to respective category
label_map = {1: 'small', 2: 'medium', 3: 'large', 4: np.nan}
petal_length_cat = [label_map[x] for x in petal_length_bin]
# View
petal_length_cat[:4]

# Q. Create a new column for volume in iris_2d, where volume is (pi x petallength x sepal_length^2)/3
url = 'https://ncut-ai.github.io/data-test/iris.data'
iris_2d = np.genfromtxt(url, delimiter=',', dtype='object')
# Solution
# Compute volume
sepallength = iris_2d[:, 0].astype('float')
petallength = iris_2d[:, 2].astype('float')
volume = (np.pi * petallength * (sepallength ** 2)) / 3
# Introduce new dimension to match iris_2d's
volume = volume[:, np.newaxis]
# Add the new column
out = np.hstack([iris_2d, volume])
# View
out[:4]

# Q. Randomly sample iris's species such that setose is twice the number of versicolor and virginica
# Import iris keeping the text column intact
url = 'https://ncut-ai.github.io/data-test/iris.data'
iris = np.genfromtxt(url, delimiter=',', dtype='object')

# Solution
# Get the species column
species = iris[:, 4]

# Approach 1: Generate Probablistically
np.random.seed(100)
a = np.array(['Iris-setosa', 'Iris-versicolor', 'Iris-virginica'])
species_out = np.random.choice(a, 150, p=[0.5, 0.25, 0.25])

# Approach 2: Probablistic Sampling (preferred)
np.random.seed(100)
probs = np.r_[np.linspace(0, 0.500, num=50), np.linspace(0.501, .750, num=50), np.linspace(.751, 1.0, num=50)]
index = np.searchsorted(probs, np.random.random(150))
species_out = species[index]
print(np.unique(species_out, return_counts=True))

# Q. What is the value of second longest petallength of species setosa
# Import iris keeping the text column intact
url = 'https://ncut-ai.github.io/data-test/iris.data'
iris = np.genfromtxt(url, delimiter=',', dtype='object')
# Solution
# Get the species and petal length columns
petal_len_setosa = iris[iris[:, 4] == b'Iris-setosa', [2]].astype('float')
# Get the second last value
np.unique(np.sort(petal_len_setosa))[-2]

# Q. Sort the iris dataset based on sepallength column.
url = 'https://ncut-ai.github.io/data-test/iris.data'
iris = np.genfromtxt(url, delimiter=',', dtype='object')
names = ('sepallength', 'sepalwidth', 'petallength', 'petalwidth', 'species')
print(iris[iris[:, 0].argsort()][:20])

# Q. Find the most frequent value of petal length (3rd column) in iris dataset.
url = 'https://ncut-ai.github.io/data-test/iris.data'
iris = np.genfromtxt(url, delimiter=',', dtype='object')
# Solution:
vals, counts = np.unique(iris[:, 2], return_counts=True)
print(vals[np.argmax(counts)])

# Q. Find the position of the first occurrence of a value greater than 1.0 in petalwidth 4th column of iris dataset.
url = 'https://ncut-ai.github.io/data-test/iris.data'
iris = np.genfromtxt(url, delimiter=',', dtype='object')
# Solution: (edit: changed argmax to argwhere. Thanks Rong!)
np.argwhere(iris[:, 3].astype(float) > 1.0)[0]

# Q. From the array a, replace all values greater than 30 to 30 and less than 10 to 10.
np.set_printoptions(precision=2)
np.random.seed(100)
a = np.random.uniform(1, 50, 20)
# Solution 1: Using np.clip
np.clip(a, a_min=10, a_max=30)
# Solution 2: Using np.where
print(np.where(a < 10, 10, np.where(a > 30, 30, a)))

# Q. Get the positions of top 5 maximum values in a given array a.
np.random.seed(100)
a = np.random.uniform(1, 50, 20)
# Solution:
print(a.argsort())
# Solution 2:
np.argpartition(-a, 5)[:5]
# Below methods will get you the values.
# Method 1:
a[a.argsort()][-5:]
# Method 2:
np.sort(a)[-5:]
# Method 3:
np.partition(a, kth=-5)[-5:]
# Method 4:
a[np.argpartition(-a, 5)][:5]

# Q. Compute the counts of unique values row-wise.
np.random.seed(100)
arr = np.random.randint(1, 11, size=(6, 10))
# Solution
def counts_of_all_values_rowwise(arr2d):
    # Unique values and its counts row wise
    num_counts_array = [np.unique(row, return_counts=True) for row in arr2d]

    # Counts of all values row wise
    return ([[int(b[a == i]) if i in a else 0 for i in np.unique(arr2d)] for a, b in num_counts_array])


print(np.arange(1, 11))
counts_of_all_values_rowwise(arr)

# Example 2:
arr = np.array([np.array(list('bill clinton')), np.array(list('narendramodi')), np.array(list('jjayalalitha'))])
print(np.unique(arr))
counts_of_all_values_rowwise(arr)

# Q. Convert array_of_arrays into a flat linear 1d array.
arr1 = np.arange(3)
arr2 = np.arange(3, 7)
arr3 = np.arange(7, 10)

paaa = list(arr1)+list(arr2)+list(arr3)
paaa1 = np.hstack((arr1,arr2,arr3))

array_of_arrays = np.array(paaa1)
print('array_of_arrays: ', array_of_arrays)
# Solution 1
#arr_2d = np.array([a for arr in array_of_arrays for a in arr])
# Solution 2:
#arr_2d = np.concatenate(array_of_arrays)
#print(arr_2d)

# Q. Compute the one-hot encodings (dummy binary variables for each unique value in the array)
np.random.seed(101)
arr = np.random.randint(1, 4, size=6)


# Solution:
def one_hot_encodings(arr):
    uniqs = np.unique(arr)
    out = np.zeros((arr.shape[0], uniqs.shape[0]))
    for i, k in enumerate(arr):
        out[i, k - 1] = 1
    return out


one_hot_encodings(arr)
# Method 2:
(arr[:, None] == np.unique(arr)).view(np.int8)

# Q. Create row numbers grouped by a categorical variable. Use the following sample from iris species as input.
url = 'https://ncut-ai.github.io/data-test/iris.data'
species = np.genfromtxt(url, delimiter=',', dtype='str', usecols=4)
np.random.seed(100)
species_small = np.sort(np.random.choice(species, size=20))
print(species_small)
print([i for val in np.unique(species_small) for i, grp in enumerate(species_small[species_small == val])])

# Q. Create group ids based on a given categorical variable. Use the following sample from iris species as input.
url = 'https://ncut-ai.github.io/data-test/iris.data'
species = np.genfromtxt(url, delimiter=',', dtype='str', usecols=4)
np.random.seed(100)
species_small = np.sort(np.random.choice(species, size=20))
# Solution:
output = [np.argwhere(np.unique(species_small) == s).tolist()[0][0] for val in np.unique(species_small) for s in
          species_small[species_small == val]]
# Solution: For Loop version
output = []
uniqs = np.unique(species_small)

for val in uniqs:  # uniq values in group
    for s in species_small[species_small == val]:  # each element in group
        groupid = np.argwhere(uniqs == s).tolist()[0][0]  # groupid
        output.append(groupid)

print(output)

# Q. Create the ranks for the given numeric array a.
np.random.seed(10)
a = np.random.randint(20, size=10)
print('Array: ', a)
# Solution
print(a.argsort().argsort())
print('Array: ', a)

# Q. Create a rank array of the same shape as a given numeric array a.
np.random.seed(10)
a = np.random.randint(20, size=[2, 5])
print(a)
# Solution
print(a.ravel().argsort().argsort().reshape(a.shape))

# Q. Compute the maximum for each row in the given array.
np.random.seed(100)
a = np.random.randint(1, 10, [5, 3])
# Solution 1
np.amax(a, axis=1)
# Solution 2
np.apply_along_axis(np.max, arr=a, axis=1)

# Q. Compute the min-by-max for each row for given 2d numpy array.
np.random.seed(100)
a = np.random.randint(1, 10, [5, 3])
# Solution
np.apply_along_axis(lambda x: np.min(x) / np.max(x), arr=a, axis=1)

# Q. Find the duplicate entries (2nd occurrence onwards) in the given numpy array and mark them as True. First time occurrences should be False.
np.random.seed(100)
a = np.random.randint(0, 5, 10)
## Solution
# There is no direct function to do this as of 1.13.3
# Create an all True array
out = np.full(a.shape[0], True)
# Find the index positions of unique elements
unique_positions = np.unique(a, return_index=True)[1]
# Mark those positions as False
out[unique_positions] = False
print(out)

# Q. Find the mean of a numeric column grouped by a categorical column in a 2D numpy array
url = 'https://ncut-ai.github.io/data-test/iris.data'
iris = np.genfromtxt(url, delimiter=',', dtype='object')
names = ('sepallength', 'sepalwidth', 'petallength', 'petalwidth', 'species')
# Solution
# No direct way to implement this. Just a version of a workaround.
numeric_column = iris[:, 1].astype('float')  # sepalwidth
grouping_column = iris[:, 4]  # species
# List comprehension version
[[group_val, numeric_column[grouping_column == group_val].mean()] for group_val in np.unique(grouping_column)]
# For Loop version
output = []
for group_val in np.unique(grouping_column):
    output.append([group_val, numeric_column[grouping_column == group_val].mean()])

# Q. Import the image from the following URL and convert it to a numpy array.
# Import image from URL
URL = 'https://ncut-ai.github.io/data-test/Denali_Mt_McKinley.jpg'
response = requests.get(URL, timeout=5)

# Read it as Image
I = Image.open(BytesIO(response.content))

# Optionally resize
I = I.resize([150, 150])

# Convert to numpy array
arr = np.asarray(I)

# Optionaly Convert it back to an image and show
im = PIL.Image.fromarray(np.uint8(arr))
Image.Image.show(im)


# Q. Drop all nan values from a 1D numpy array
a = np.array([1, 2, 3, np.nan, 5, 6, 7, np.nan])
a[~np.isnan(a)]


# Q. Compute the euclidean distance between two arrays a and b.
a = np.array([1, 2, 3, 4, 5])
b = np.array([4, 5, 6, 7, 8])

# Solution
dist = np.linalg.norm(a - b)
print(dist)


# Q. Find all the peaks in a 1D numpy array a. Peaks are points surrounded by smaller values on both sides.
a = np.array([1, 3, 7, 1, 2, 6, 0, 1])
doublediff = np.diff(np.sign(np.diff(a)))
peak_locations = np.where(doublediff == -2)[0] + 1
peak_locations


# Q. Subtract the 1d array b_1d from the 2d array a_2d, such that each item of b_1d subtracts from respective row of a_2d.
a_2d = np.array([[3, 3, 3], [4, 4, 4], [5, 5, 5]])
b_1d = np.array([1, 2, 3])

# Solution
print(a_2d - b_1d[:, None])


# Q. Find the index of 5th repetition of number 1 in x.
x = np.array([1, 2, 1, 1, 3, 4, 3, 1, 1, 2, 1, 1, 2])
n = 5
# Solution 1: List comprehension
[i for i, v in enumerate(x) if v == 1][n - 1]
# Solution 2: Numpy version
np.where(x == 1)[0][n - 1]


# Q. Convert numpy's datetime64 object to datetime's datetime object
dt64 = np.datetime64('2018-02-25 22:10:10')
# Solution
dt64.tolist()
# or
dt64.astype(datetime)


# Q. Compute the moving average of window size 3, for the given 1D array.
# Source: https://stackoverflow.com/questions/14313510/how-to-calculate-moving-average-using-numpy
def moving_average(a, n=3):
    ret = np.cumsum(a, dtype=float)
    ret[n:] = ret[n:] - ret[:-n]
    return ret[n - 1:] / n

np.random.seed(100)
Z = np.random.randint(10, size=10)
print('array: ', Z)
# Method 1
moving_average(Z, n=3).round(2)
# Method 2:  # Thanks AlanLRH!
# np.ones(3)/3 gives equal weights. Use np.ones(4)/4 for window size 4.
np.convolve(Z, np.ones(3) / 3, mode='valid')

# Q. Create a numpy array of length 10, starting from 5 and has a step of 3 between consecutive numbers
length = 10
start = 5
step = 3


def seq(start, length, step):
    end = start + (step * length)
    return np.arange(start, end, step)


seq(start, length, step)


# Q. Given an array of a non-continuous sequence of dates. Make it a continuous sequence of dates, by filling in the missing dates.
dates = np.arange(np.datetime64('2018-02-01'), np.datetime64('2018-02-25'), 2)
print(dates)

# Solution ---------------
filled_in = np.array([np.arange(date, (date + d)) for date, d in zip(dates, np.diff(dates))]).reshape(-1)

# add the last day
output = np.hstack([filled_in, dates[-1]])
output

# For loop version -------
out = []
for date, d in zip(dates, np.diff(dates)):
    out.append(np.arange(date, (date + d)))

filled_in = np.array(out).reshape(-1)

# add the last day
output = np.hstack([filled_in, dates[-1]])


# Q. From the given 1d array arr, generate a 2d matrix using strides, with a window length of 4 and strides of 2, like [[0,1,2,3], [2,3,4,5], [4,5,6,7]..]
def gen_strides(a, stride_len=5, window_len=5):
    n_strides = ((a.size - window_len) // stride_len) + 1
    # return np.array([a[s:(s+window_len)] for s in np.arange(0, a.size, stride_len)[:n_strides]])
    return np.array([a[s:(s + window_len)] for s in np.arange(0, n_strides * stride_len, stride_len)])


print(gen_strides(np.arange(15), stride_len=2, window_len=4))

