# Bach Chorale example
# Autoencoder decoding
# Jean-Pierre Briot
# 05/04/2019

import random

from keras.engine import Input, Model
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras import regularizers
from keras.utils import np_utils

from representation import *
from metrics import *

config.deep_music_analysis_verbose = False	# analysis verbose flag - default value = False
config.deep_music_training_verbose = True	# training verbose flag - default value = False
config.deep_music_generate_verbose = True	# generation verbose flag - default value = False

print('Loading and parsing the corpus (Bach chorales)')

corpus_names_list = []

for i in range(80):
	corpus_names_list.append('bach/bwv' + str(344 + i))

corpus_list = load_corpus(corpus_names_list)

print('Analyze the corpus (Bach chorales)')

analyze_corpus(corpus_list)

print('Durations (in quarter length): ' + str(music_duration_quarter_length_v))

max_number_quarters = min(music_duration_quarter_length_v)

config.max_number_time_steps = int(max_number_quarters / config.time_step_quarter_length)

print('Max number quarters (in quarter length): ' + str(max_number_quarters))
print('Max number time steps: ' + str(config.max_number_time_steps))

print('Encode the corpus (Bach chorales)')

corpus_data = encode_corpus(corpus_list)
# structure: hierarchy: music/part/encoded_data

print('Construction of the training and validation (test) datasets')

X_train = []
y_train = []

for i in range(len(corpus_data)):
	X_train.append(corpus_data[i][0])

# X_train : [[0, 0, 0 ..  0, (:) ... (:) 0, 0, 0 .. 0], ... [0, 0, 0 ..  0, (:) ... (:) 0, 0, 0 .. 0]]
#                  chorale 1				       chorale P
#		   time step 1  		 time step N     time step 1		      time step N

X_train, y_train, X_test, y_test = split_training_test(X_train, X_train, 4)

# numpy representations

X_train = np.array(X_train)
y_train = X_train
X_test = np.array(X_test)
y_test = X_test

print('X_train shape: ' + str(X_train.shape))		# (64, 3520)
print('y_train shape: ' + str(y_train.shape))			# (64, 3520)
print('X_test shape: ' + str(X_test.shape))			# (16, 3520)
print('y_test shape: ' + str(y_test.shape))			# (16, 3520)

# Define the deep learning model/architecture

input_size = config.max_number_time_steps * config.one_hot_size_v[0]

output_size = input_size

print('Input size: ' + str(input_size))
print('Output size: ' + str(output_size))

# Input size: 3520 = 22 * 4 * 4 * 10
#				one_hot_size_[v0] * (time_slice / 4) * max_number_quarters
#											(4 * max number measures)
# Output size = Input size = 3520

# Define the deep learning model/architecture

hidden_layer_size =  50

hidden_layer_input_size = hidden_layer_size

print('Hidden layer input size: ' + str(hidden_layer_size))

print('Define and create the autoencoder model/network')

# Model of the Autoencoder
model = Sequential()

model.add(Dense(hidden_layer_size,
	input_shape = (input_size, ),
	activation = 'sigmoid',
	activity_regularizer = regularizers.l1(10e-5)))
model.add(Dense(input_size,
	activation = 'sigmoid'))

# Organize decoder input

# Encoder_layer = model.layers[0]
Decoder_layer = model.layers[1]

Decoder_Input_layer = Input(shape = (hidden_layer_size, ))

decoder = Model(input = Decoder_Input_layer,
	output = Decoder_layer(Decoder_Input_layer))

model.compile(optimizer = 'adam',
	loss = 'binary_crossentropy',
	metrics = ['accuracy'])

# Training

print('Training the model/network')

history = model.fit(X_train,
	y_train,
	batch_size = 20,
	epochs = 100,
	verbose = keras_verbose(),
	validation_data = (X_test, y_test))

print('Model trained')

if config.deep_music_training_verbose:
	print('Show metrics')
	show_metrics(model, history, X_train, y_train, X_test, y_test)

def create_melodies(n, min, max):
	return(create_melodies_from_labels(create_labels(n, min, max)))

def create_labels(n, min, max):
	labels_list = []
	for i in range(n):
		label = []
		for j in range(hidden_layer_input_size):
			label.append(random.uniform(min, max))
		labels_list.append(label)
	return(labels_list)

def create_melodies_from_labels(labels_list):
	data_list = decoder.predict(np.array(labels_list))
	data_list2 = []
	for i in range(len(data_list)):			# transforms	[[0, 0 .. 0 (:) ... (:) 0, 0 .. 0], ... [0, 0 .. 0 (:) ... (:) 0, 0 .. 0]]
		data_list2.append([data_list[i]])		# into		[[[0, 0 .. 0 (:) ... (:) 0, 0 .. 0]], ... [[0, 0 .. 0 (:) ... (:) 0, 0 .. 0]]]
	return(data_list2)

# No softmax -> encoded notes are not probabilities -> no sample (stay deterministic) or from an added softmax

print('Create random melodies')

melodies_list = create_melodies(5, 0, 1)

scores = create_scores(1, melodies_list)

if not(config.is_deterministic):
	print(str((config.multinomial_error_number / config.multinomial_sampling_number) * 100) + '% of random multinomial errors when sampling')

print('Write the scores')

for i in range(len(scores)):
	scores[i].write('midi', 'mid/auto_melody_' + str(i) + '.mid')

print('Create specific melodies')

all_one_label = [0] * hidden_layer_input_size

all_zero_label = [1] * hidden_layer_input_size

melodies_list2 = create_melodies_from_labels([all_zero_label, all_one_label])

scores2 = create_score(1, melodies_list2)

print('Write the scores')

scores[0].write('midi', 'mid/auto_melody_all_zero.mid')
scores[1].write('midi', 'mid/auto_melody_all_one.mid')
