desc stringlengths 3 26.7k | decl stringlengths 11 7.89k | bodies stringlengths 8 553k |
|---|---|---|
'Returns a SequenceExample for the given event sequence pair.
Args:
control_events: A list-like sequence of control events.
target_events: A list-like sequence of target events, the same length as
`control_events`.
Returns:
A tf.train.SequenceExample containing inputs and labels.
Raises:
ValueError: If the control and ... | def encode(self, control_events, target_events):
| if (len(control_events) != len(target_events)):
raise ValueError(('must have the same number of control and target events (%d control events but %d target events)' % (len(control_events), len(target_events))))
inputs = []
labels = []
for i in range... |
'Returns an inputs batch for the given control and target event sequences.
Args:
control_event_sequences: A list of list-like control event sequences.
target_event_sequences: A list of list-like target event sequences, the
same length as `control_event_sequences`. Each target event sequence
must be shorter than the cor... | def get_inputs_batch(self, control_event_sequences, target_event_sequences, full_length=False):
| if (len(control_event_sequences) != len(target_event_sequences)):
raise ValueError(('%d control event sequences but %d target event sequences' % len(control_event_sequences, len(target_event_sequences))))
inputs_batch = []
for (control_events, target_events) in zip(control_ev... |
'Extends the event sequences by sampling the softmax probabilities.
Args:
target_event_sequences: A list of target EventSequence objects.
softmax: A list of softmax probability vectors. The list of softmaxes
should be the same length as the list of event sequences.
Returns:
A Python list of chosen class indices, one fo... | def extend_event_sequences(self, target_event_sequences, softmax):
| return self._target_encoder_decoder.extend_event_sequences(target_event_sequences, softmax)
|
'Evaluate the log likelihood of multiple target event sequences.
Args:
target_event_sequences: A list of target EventSequence objects.
softmax: A list of softmax probability vectors. The list of softmaxes
should be the same length as the list of target event sequences. The
softmax vectors are assumed to have been gener... | def evaluate_log_likelihood(self, target_event_sequences, softmax):
| return self._target_encoder_decoder.evaluate_log_likelihood(target_event_sequences, softmax)
|
'Initialize a MultipleEventSequenceEncoder object.
Args:
encoders: A list of component EventSequenceEncoderDecoder objects whose
output will be concatenated.
encode_single_sequence: If True, at encoding time all of the encoders will
be applied to a single event sequence. If False, each event of the
event sequence shoul... | def __init__(self, encoders, encode_single_sequence=False):
| self._encoders = encoders
self._encode_single_sequence = encode_single_sequence
|
'Constructs an EncoderPipeline.
Args:
input_type: The type this pipeline expects as input.
encoder_decoder: An EventSequenceEncoderDecoder.
name: A unique pipeline name.'
| def __init__(self, input_type, encoder_decoder, name=None):
| super(EncoderPipeline, self).__init__(input_type=input_type, output_type=tf.train.SequenceExample, name=name)
self._encoder_decoder = encoder_decoder
|
'Verify sustain controls extend notes until the end of the control.'
| def testApplySustainControlChanges(self):
| sequence = copy.copy(self.note_sequence)
testing_lib.add_control_changes_to_sequence(sequence, 0, [(0.0, 64, 127), (0.75, 64, 0), (2.0, 64, 127), (3.0, 64, 0), (3.75, 64, 127), (4.5, 64, 127), (4.8, 64, 0), (4.9, 64, 127), (6.0, 64, 0)])
testing_lib.add_track_to_sequence(sequence, 1, [(12, 100, 0.01, 10.0),... |
'Verify that sustain control handles repeated notes correctly.
For example, a single pitch played before sustain:
x-- x-- x--
After sustain:
x---x---x--
Notes should be extended until either the end of the sustain control or the
beginning of another note of the same pitch.'
| def testApplySustainControlChangesWithRepeatedNotes(self):
| sequence = copy.copy(self.note_sequence)
testing_lib.add_control_changes_to_sequence(sequence, 0, [(1.0, 64, 127), (4.0, 64, 0)])
expected_sequence = copy.copy(sequence)
testing_lib.add_track_to_sequence(sequence, 0, [(60, 100, 0.25, 1.5), (60, 100, 1.25, 1.5), (72, 100, 2.0, 3.5), (60, 100, 2.0, 3.0), ... |
'Repeated notes before sustain can overlap and should not be modified.
Once a repeat happens within the sustain, any active notes should end
before the next one starts.
This is kind of an edge case because a note overlapping a note of the same
pitch may not make sense, but apply_sustain_control_changes tries not to
mod... | def testApplySustainControlChangesWithRepeatedNotesBeforeSustain(self):
| sequence = copy.copy(self.note_sequence)
testing_lib.add_control_changes_to_sequence(sequence, 0, [(1.0, 64, 127), (4.0, 64, 0)])
expected_sequence = copy.copy(sequence)
testing_lib.add_track_to_sequence(sequence, 0, [(60, 100, 0.25, 1.5), (60, 100, 0.5, 1.5), (60, 100, 1.25, 2.0)])
testing_lib.add_... |
'Test sustain on and off events happening at the same time.
The off event should be processed last, so this should be a no-op.'
| def testApplySustainControlChangesSimultaneousOnOff(self):
| sequence = copy.copy(self.note_sequence)
testing_lib.add_control_changes_to_sequence(sequence, 0, [(1.0, 64, 127), (1.0, 64, 0)])
testing_lib.add_track_to_sequence(sequence, 0, [(60, 100, 0.5, 1.5), (60, 100, 2.0, 3.0)])
sus_sequence = sequences_lib.apply_sustain_control_changes(sequence)
self.asser... |
'Test sustain control extending the duration of the final note.'
| def testApplySustainControlChangesExtendNotesToEnd(self):
| sequence = copy.copy(self.note_sequence)
testing_lib.add_control_changes_to_sequence(sequence, 0, [(1.0, 64, 127), (4.0, 64, 0)])
expected_sequence = copy.copy(sequence)
testing_lib.add_track_to_sequence(sequence, 0, [(60, 100, 0.5, 1.5), (72, 100, 2.0, 3.0)])
testing_lib.add_track_to_sequence(expec... |
'Test applying extraneous sustain control at the end of the sequence.'
| def testApplySustainControlChangesExtraneousSustain(self):
| sequence = copy.copy(self.note_sequence)
testing_lib.add_control_changes_to_sequence(sequence, 0, [(4.0, 64, 127), (5.0, 64, 0)])
expected_sequence = copy.copy(sequence)
testing_lib.add_track_to_sequence(sequence, 0, [(60, 100, 0.5, 1.5), (72, 100, 2.0, 3.0)])
testing_lib.add_track_to_sequence(expec... |
'In the case of identical notes, one should be dropped.
This is an edge case because in most cases, the same pitch should not sound
twice at the same time on one instrument.'
| def testApplySustainControlChangesWithIdenticalNotes(self):
| sequence = copy.copy(self.note_sequence)
testing_lib.add_control_changes_to_sequence(sequence, 0, [(1.0, 64, 127), (4.0, 64, 0)])
expected_sequence = copy.copy(sequence)
testing_lib.add_track_to_sequence(sequence, 0, [(60, 100, 2.0, 2.5), (60, 100, 2.0, 2.5)])
testing_lib.add_track_to_sequence(expec... |
'Construct a Melody.'
| def __init__(self, events=None, **kwargs):
| if ('pad_event' in kwargs):
del kwargs['pad_event']
super(Melody, self).__init__(pad_event=MELODY_NO_EVENT, events=events, **kwargs)
|
'Initializes with a list of event values and sets attributes.
Args:
events: List of Melody events to set melody to.
start_step: The integer starting step offset.
steps_per_bar: The number of steps in a bar.
steps_per_quarter: The number of steps in a quarter note.
Raises:
ValueError: If `events` contains an event that ... | def _from_event_list(self, events, start_step=0, steps_per_bar=DEFAULT_STEPS_PER_BAR, steps_per_quarter=DEFAULT_STEPS_PER_QUARTER):
| for event in events:
if (not (MIN_MELODY_EVENT <= event <= MAX_MELODY_EVENT)):
raise ValueError(('Melody event out of range: %d' % event))
super(Melody, self)._from_event_list(events, start_step=start_step, steps_per_bar=steps_per_bar, steps_per_quarter=steps_per_quarter)
|
'Adds the given note to the `events` list.
`start_step` is set to the given pitch. `end_step` is set to NOTE_OFF.
Everything after `start_step` in `events` is deleted before the note is
added. `events`\'s length will be changed so that the last event has index
`end_step`.
Args:
pitch: Midi pitch. An integer between 0 a... | def _add_note(self, pitch, start_step, end_step):
| if (start_step >= end_step):
raise BadNoteException(('Start step does not precede end step: start=%d, end=%d' % (start_step, end_step)))
self.set_length((end_step + 1))
self._events[start_step] = pitch
self._events[end_step] = MELODY_NOTE_OFF
for i in range((start_ste... |
'Returns indexes of the most recent pitch and NOTE_OFF events.
Returns:
A tuple (start_step, end_step) of the last note\'s on and off event
indices.
Raises:
ValueError: If `events` contains no NOTE_OFF or pitch events.'
| def _get_last_on_off_events(self):
| last_off = len(self)
for i in range((len(self) - 1), (-1), (-1)):
if (self._events[i] == MELODY_NOTE_OFF):
last_off = i
if (self._events[i] >= MIN_MIDI_PITCH):
return (i, last_off)
raise ValueError('No events in the stream')
|
'Gets a histogram of the note occurrences in a melody.
Returns:
A list of 12 ints, one for each note value (C at index 0 through B at
index 11). Each int is the total number of times that note occurred in
the melody.'
| def get_note_histogram(self):
| np_melody = np.array(self._events, dtype=int)
return np.bincount((np_melody[(np_melody >= MIN_MIDI_PITCH)] % NOTES_PER_OCTAVE), minlength=NOTES_PER_OCTAVE)
|
'Gets a histogram of the how many notes fit into each key.
Returns:
A list of 12 ints, one for each Major key (C Major at index 0 through
B Major at index 11). Each int is the total number of notes that could
fit into that key.'
| def get_major_key_histogram(self):
| note_histogram = self.get_note_histogram()
key_histogram = np.zeros(NOTES_PER_OCTAVE)
for (note, count) in enumerate(note_histogram):
key_histogram[NOTE_KEYS[note]] += count
return key_histogram
|
'Finds the major key that this melody most likely belongs to.
If multiple keys match equally, the key with the lowest index is returned,
where the indexes of the keys are C Major = 0 through B Major = 11.
Returns:
An int for the most likely key (C Major = 0 through B Major = 11)'
| def get_major_key(self):
| key_histogram = self.get_major_key_histogram()
return key_histogram.argmax()
|
'Appends the event to the end of the melody and increments the end step.
An implicit NOTE_OFF at the end of the melody will not be respected by this
modification.
Args:
event: The integer Melody event to append to the end.
Raises:
ValueError: If `event` is not in the proper range.'
| def append(self, event):
| if (not (MIN_MELODY_EVENT <= event <= MAX_MELODY_EVENT)):
raise ValueError(('Event out of range: %d' % event))
super(Melody, self).append(event)
|
'Populate self with a melody from the given quantized NoteSequence.
A monophonic melody is extracted from the given `instrument` starting at
`search_start_step`. `instrument` and `search_start_step` can be used to
drive extraction of multiple melodies from the same quantized sequence. The
end step of the extracted melo... | def from_quantized_sequence(self, quantized_sequence, search_start_step=0, instrument=0, gap_bars=1, ignore_polyphonic_notes=False, pad_end=False, filter_drums=True):
| sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)
self._reset()
steps_per_bar_float = sequences_lib.steps_per_bar_in_quantized_sequence(quantized_sequence)
if ((steps_per_bar_float % 1) != 0):
raise events_lib.NonIntegerStepsPerBarException(('There are %f timesteps... |
'Converts the Melody to NoteSequence proto.
The end of the melody is treated as a NOTE_OFF event for any sustained
notes.
Args:
velocity: Midi velocity to give each note. Between 1 and 127 (inclusive).
instrument: Midi instrument to give each note.
program: Midi program to give each note.
sequence_start_time: A time in... | def to_sequence(self, velocity=100, instrument=0, program=0, sequence_start_time=0.0, qpm=120.0):
| seconds_per_step = ((60.0 / qpm) / self.steps_per_quarter)
sequence = music_pb2.NoteSequence()
sequence.tempos.add().qpm = qpm
sequence.ticks_per_quarter = STANDARD_PPQ
sequence_start_time += (self.start_step * seconds_per_step)
current_sequence_note = None
for (step, note) in enumerate(self... |
'Transpose notes in this Melody.
All notes are transposed the specified amount. Additionally, all notes
are octave shifted to lie within the [min_note, max_note) range.
Args:
transpose_amount: The number of half steps to transpose this Melody.
Positive values transpose up. Negative values transpose down.
min_note: Mini... | def transpose(self, transpose_amount, min_note=0, max_note=128):
| for i in range(len(self)):
if (self._events[i] >= MIN_MIDI_PITCH):
self._events[i] += transpose_amount
if (self._events[i] < min_note):
self._events[i] = (min_note + ((self._events[i] - min_note) % NOTES_PER_OCTAVE))
elif (self._events[i] >= max_note):
... |
'Transpose and octave shift the notes in this Melody.
The key center of this melody is computed with a heuristic, and the notes
are transposed to be in the given key. The melody is also octave shifted
to be centered in the given range. Additionally, all notes are octave
shifted to lie within a given range.
Args:
min_no... | def squash(self, min_note, max_note, transpose_to_key=None):
| if (transpose_to_key is None):
transpose_amount = 0
else:
melody_key = self.get_major_key()
key_diff = (transpose_to_key - melody_key)
midi_notes = [note for note in self._events if (MIN_MIDI_PITCH <= note <= MAX_MIDI_PITCH)]
if (not midi_notes):
return 0
... |
'Sets the length of the melody to the specified number of steps.
If the melody is not long enough, ends any sustained notes and adds NO_EVENT
steps for padding. If it is too long, it will be truncated to the requested
length.
Args:
steps: How many steps long the melody should be.
from_left: Whether to add/remove from t... | def set_length(self, steps, from_left=False):
| old_len = len(self)
super(Melody, self).set_length(steps, from_left=from_left)
if ((steps > old_len) and (not from_left)):
for i in reversed(range(old_len)):
if (self._events[i] == MELODY_NOTE_OFF):
break
elif (self._events[i] != MELODY_NO_EVENT):
... |
'Increase the resolution of a Melody.
Increases the resolution of a Melody object by a factor of `k`. This uses
MELODY_NO_EVENT to extend each event in the melody to be `k` steps long.
Args:
k: An integer, the factor by which to increase the resolution of the
melody.'
| def increase_resolution(self, k):
| super(Melody, self).increase_resolution(k, fill_event=MELODY_NO_EVENT)
|
'Constructs a BaseSequenceGenerator.
Args:
model: An instance of BaseModel.
details: A generator_pb2.GeneratorDetails for this generator.
checkpoint: Where to look for the most recent model checkpoint. Either a
directory to be used with tf.train.latest_checkpoint or the path to a
single checkpoint file. Or None if a bu... | def __init__(self, model, details, checkpoint, bundle):
| self._model = model
self._details = details
self._checkpoint = checkpoint
self._bundle = bundle
if ((self._checkpoint is None) and (self._bundle is None)):
raise SequenceGeneratorException('Either checkpoint or bundle must be set')
if ((self._checkpoint is not None) and... |
'Returns a GeneratorDetails description of this generator.'
| @property
def details(self):
| return self._details
|
'Returns the BundleDetails or None if checkpoint was used.'
| @property
def bundle_details(self):
| if (self._bundle is None):
return None
return self._bundle.bundle_details
|
'Implementation for sequence generation based on sequence and options.
The implementation can assume that _initialize has been called before this
method is called.
Args:
input_sequence: An input NoteSequence to base the generation on.
generator_options: A GeneratorOptions proto with options to use for
generation.
Retur... | @abc.abstractmethod
def _generate(self, input_sequence, generator_options):
| pass
|
'Builds the TF graph and loads the checkpoint.
If the graph has already been initialized, this is a no-op.
Raises:
SequenceGeneratorException: If the checkpoint cannot be found.'
| def initialize(self):
| if self._initialized:
return
if (self._checkpoint is not None):
if (not _checkpoint_file_exists(self._checkpoint)):
raise SequenceGeneratorException(('Checkpoint path does not exist: %s' % self._checkpoint))
checkpoint_file = self._checkpoint
if tf.gfil... |
'Closes the TF session.
If the session was already closed, this is a no-op.'
| def close(self):
| if self._initialized:
self._model.close()
self._initialized = False
|
'When used as a context manager, initializes the TF session.'
| def __enter__(self):
| self.initialize()
return self
|
'When used as a context manager, closes the TF session.'
| def __exit__(self, *args):
| self.close()
|
'Generates a sequence from the model based on sequence and options.
Also initializes the TF graph if not yet initialized.
Args:
input_sequence: An input NoteSequence to base the generation on.
generator_options: A GeneratorOptions proto with options to use for
generation.
Returns:
The generated NoteSequence proto.'
| def generate(self, input_sequence, generator_options):
| self.initialize()
return self._generate(input_sequence, generator_options)
|
'Writes a generator_pb2.GeneratorBundle file in the specified location.
Saves the checkpoint, metagraph, and generator id in one file.
Args:
bundle_file: Location to write the bundle file.
bundle_description: A short, human-readable string description of this
bundle.
Raises:
SequenceGeneratorException: if there is an e... | def create_bundle_file(self, bundle_file, bundle_description=None):
| if (not bundle_file):
raise SequenceGeneratorException('Bundle file location not specified.')
if (not self.details.id):
raise SequenceGeneratorException('Generator id must be included in GeneratorDetails when creating a bundle file.')
if (not self... |
'Returns the input vector for the given position in the chord progression.
Indices [0, 36]:
[0]: Whether or not this chord is "no chord".
[1, 12]: A one-hot encoding of the chord root pitch class.
[13, 24]: Whether or not each pitch class is present in the chord.
[25, 36]: A one-hot encoding of the chord bass pitch cla... | def events_to_input(self, events, position):
| chord = events[position]
input_ = ([0.0] * self.input_size)
if (chord == NO_CHORD):
input_[0] = 1.0
return input_
root = chord_symbols_lib.chord_symbol_root(chord)
input_[(1 + root)] = 1.0
pitches = chord_symbols_lib.chord_symbol_pitches(chord)
for pitch in pitches:
i... |
'Initializes the MultiDrumOneHotEncoding.
Args:
drum_type_pitches: A Python list of the MIDI pitch values for each drum
type. If None, `DEFAULT_DRUM_TYPE_PITCHES` will be used.
ignore_unknown_drums: If True, unknown drum pitches will not be encoded.
If False, a DrumsEncodingException will be raised when unknown drum
pi... | def __init__(self, drum_type_pitches=None, ignore_unknown_drums=True):
| if (drum_type_pitches is None):
drum_type_pitches = DEFAULT_DRUM_TYPE_PITCHES
self._drum_map = dict(enumerate(drum_type_pitches))
self._inverse_drum_map = dict(((pitch, index) for (index, pitches) in self._drum_map.items() for pitch in pitches))
self._ignore_unknown_drums = ignore_unknown_drums
|
'Initializes a MelodyOneHotEncoding object.
Args:
min_note: The minimum midi pitch the encoded melody events can have.
max_note: The maximum midi pitch (exclusive) the encoded melody events
can have.
Raises:
ValueError: If `min_note` or `max_note` are outside the midi range, or if
`max_note` is not greater than `min_no... | def __init__(self, min_note, max_note):
| if (min_note < MIN_MIDI_PITCH):
raise ValueError(('min_note must be >= 0. min_note is %d.' % min_note))
if (max_note > (MAX_MIDI_PITCH + 1)):
raise ValueError(('max_note must be <= 128. max_note is %d.' % max_note))
if (max_note <= min_note):
... |
'Collapses a melody event value into a zero-based index range.
Args:
event: A Melody event value. -2 = no event, -1 = note-off event,
[0, 127] = note-on event for that midi pitch.
Returns:
An int in the range [0, self.num_classes). 0 = no event,
1 = note-off event, [2, self.num_classes) = note-on event for
that pitch r... | def encode_event(self, event):
| if (event < (- NUM_SPECIAL_MELODY_EVENTS)):
raise ValueError(('invalid melody event value: %d' % event))
if ((event >= 0) and (event < self._min_note)):
raise ValueError(('melody event less than min note: %d < %d' % (event, self._min_note)))
if (event >= s... |
'Expands a zero-based index value to its equivalent melody event value.
Args:
index: An int in the range [0, self._num_model_events).
0 = no event, 1 = note-off event,
[2, self._num_model_events) = note-on event for that pitch relative
to the [self._min_note, self._max_note) range.
Returns:
A Melody event value. -2 = n... | def decode_event(self, index):
| if (index < NUM_SPECIAL_MELODY_EVENTS):
return (index - NUM_SPECIAL_MELODY_EVENTS)
return ((index - NUM_SPECIAL_MELODY_EVENTS) + self._min_note)
|
'Initializes the KeyMelodyEncoderDecoder.
Args:
min_note: The minimum midi pitch the encoded melody events can have.
max_note: The maximum midi pitch (exclusive) the encoded melody events can
have.
lookback_distances: A list of step intervals to look back in history to
encode both the following event and whether the cu... | def __init__(self, min_note, max_note, lookback_distances=None, binary_counter_bits=7):
| self._lookback_distances = (lookback_distances if (lookback_distances is not None) else DEFAULT_LOOKBACK_DISTANCES)
self._binary_counter_bits = binary_counter_bits
self._min_note = min_note
self._note_range = (max_note - min_note)
|
'Returns the input vector for the given position in the melody.
Returns a self.input_size length list of floats. Assuming
self._min_note = 48, self._note_range = 36, two lookback distances, and
seven binary counters, then self.input_size = 74. Each index represents a
different input signal to the model.
Indices [0, 73]... | def events_to_input(self, events, position):
| current_note = None
is_attack = False
is_ascending = None
last_3_notes = collections.deque(maxlen=3)
sub_melody = melodies_lib.Melody(events[:(position + 1)])
for note in sub_melody:
if (note == MELODY_NO_EVENT):
is_attack = False
elif (note == MELODY_NOTE_OFF):
... |
'Returns the label for the given position in the melody.
Returns an int in the range [0, self.num_classes). Assuming
self._min_note = 48, self._note_range = 36, and two lookback distances,
then self.num_classes = 40.
Values [0, 39]:
[0, 35]: Note-on event for midi pitch [48, 84).
36: No event.
37: Note-off event.
38: R... | def events_to_label(self, events, position):
| if ((position < self._lookback_distances[(-1)]) and (events[position] == MELODY_NO_EVENT)):
return ((self._note_range + len(self._lookback_distances)) + 1)
for (i, lookback_distance) in reversed(list(enumerate(self._lookback_distances))):
lookback_position = (position - lookback_distance)
... |
'Returns the melody event for the given class index.
This is the reverse process of the self.events_to_label method.
Args:
class_index: An int in the range [0, self.num_classes).
events: The magenta.music.Melody events list of the current melody.
Returns:
A magenta.music.Melody event value.'
| def class_index_to_event(self, class_index, events):
| for (i, lookback_distance) in reversed(list(enumerate(self._lookback_distances))):
if (class_index == ((self._note_range + 2) + i)):
if (len(events) < lookback_distance):
return MELODY_NO_EVENT
return events[(- lookback_distance)]
if (class_index == (self._note_ra... |
'Compares MusicXMLDocument object against a sequence proto.
Args:
musicxml: A MusicXMLDocument object.
sequence_proto: A tensorflow.magenta.Sequence proto.'
| def checkmusicxmlandsequence(self, musicxml, sequence_proto):
| self.assertEqual(len(musicxml.get_time_signatures()), len(sequence_proto.time_signatures))
for (musicxml_time, sequence_time) in zip(musicxml.get_time_signatures(), sequence_proto.time_signatures):
self.assertEqual(musicxml_time.numerator, sequence_time.numerator)
self.assertEqual(musicxml_time.... |
'Test the translation from MusicXML to Sequence proto.'
| def checkmusicxmltosequence(self, filename):
| source_musicxml = musicxml_parser.MusicXMLDocument(filename)
sequence_proto = musicxml_reader.musicxml_to_sequence_proto(source_musicxml)
self.checkmusicxmlandsequence(source_musicxml, sequence_proto)
|
'Verify MusicXML scale file.
Verify that it contains the correct pitches (sounding pitch) and durations.
Args:
filename: file to test.
part_name: name of the part the sequence is expected to contain.'
| def checkFMajorScale(self, filename, part_name):
| expected_ns = common_testing_lib.parse_test_proto(music_pb2.NoteSequence, '\n ticks_per_quarter: 220\n source_info: {\n source_type: SCORE_BASED\n encoding_type: MU... |
'Test the simple flute scale MusicXML file.'
| def testsimplemusicxmltosequence(self):
| self.checkmusicxmltosequence(self.flute_scale_filename)
self.checkFMajorScale(self.flute_scale_filename, 'Flute')
|
'Test the complex band score MusicXML file.'
| def testcomplexmusicxmltosequence(self):
| self.checkmusicxmltosequence(self.band_score_filename)
|
'Test the translation from transposed MusicXML to Sequence proto.
Compare a transposed MusicXML file (clarinet) to an identical untransposed
sequence (flute).'
| def testtransposedxmltosequence(self):
| untransposed_musicxml = musicxml_parser.MusicXMLDocument(self.flute_scale_filename)
transposed_musicxml = musicxml_parser.MusicXMLDocument(self.clarinet_scale_filename)
untransposed_proto = musicxml_reader.musicxml_to_sequence_proto(untransposed_musicxml)
self.checkmusicxmlandsequence(transposed_musicxm... |
'Test an MXL file containing a unicode filename within its zip archive.'
| def testcompressedmxlunicodefilename(self):
| unicode_filename = os.path.join(tf.resource_loader.get_data_files_path(), 'testdata/unicode_filename.mxl')
sequence = musicxml_reader.musicxml_file_to_sequence_proto(unicode_filename)
self.assertEqual(len(sequence.notes), 8)
|
'Test the translation from compressed MusicXML to Sequence proto.
Compare a compressed MusicXML file to an identical uncompressed sequence.'
| def testcompressedxmltosequence(self):
| uncompressed_musicxml = musicxml_parser.MusicXMLDocument(self.flute_scale_filename)
compressed_musicxml = musicxml_parser.MusicXMLDocument(self.compressed_filename)
uncompressed_proto = musicxml_reader.musicxml_to_sequence_proto(uncompressed_musicxml)
self.checkmusicxmlandsequence(compressed_musicxml, u... |
'Test the translation from compressed MusicXML with multiple rootfiles.
The example MXL file contains a MusicXML file of the Flute F Major scale,
as well as the PNG rendering of the score contained within the single MXL
file.'
| def testmultiplecompressedxmltosequence(self):
| uncompressed_musicxml = musicxml_parser.MusicXMLDocument(self.flute_scale_filename)
compressed_musicxml = musicxml_parser.MusicXMLDocument(self.multiple_rootfile_compressed_filename)
uncompressed_proto = musicxml_reader.musicxml_to_sequence_proto(uncompressed_musicxml)
self.checkmusicxmlandsequence(comp... |
'Test the rhythm durations MusicXML file.'
| def testrhythmdurationsxmltosequence(self):
| self.checkmusicxmltosequence(self.rhythm_durations_filename)
|
'Verify properties of the flute scale.'
| def testFluteScale(self):
| ns = musicxml_reader.musicxml_file_to_sequence_proto(self.flute_scale_filename)
expected_ns = common_testing_lib.parse_test_proto(music_pb2.NoteSequence, '\n ticks_per_quarter: 220\n time_signatures: {\n n... |
'Test that transposition works when changing instrument transposition.
This can occur within a single part in a score where the score
has no key signature / is atonal. Examples include changing from a
non-transposing instrument to a transposing one (ex. Flute to Bb Clarinet)
or vice versa, or changing among transposing... | def test_atonal_transposition(self):
| ns = musicxml_reader.musicxml_file_to_sequence_proto(self.atonal_transposition_filename)
expected_ns = common_testing_lib.parse_test_proto(music_pb2.NoteSequence, '\n ticks_per_quarter: 220\n time_signatures: {\n ... |
'Test that incomplete measures have the correct time signature.
This can occur in pickup bars or incomplete measures. For example,
if the time signature in the MusicXML is 4/4, but the measure only
contains one quarter note, Magenta expects this pickup measure to have
a time signature of 1/4.'
| def test_incomplete_measures(self):
| ns = musicxml_reader.musicxml_file_to_sequence_proto(self.time_signature_filename)
self.assertEqual(len(ns.time_signatures), 6)
self.assertEqual(len(ns.key_signatures), 1)
self.assertEqual(len(ns.notes), 112)
|
'Test that time signatures are inserted for music without time signatures.
MusicXML does not require the use of time signatures. Music without
time signatures occur in medieval chant, cadenzas, and contemporary music.'
| def test_unmetered_music(self):
| ns = musicxml_reader.musicxml_file_to_sequence_proto(self.unmetered_filename)
expected_ns = common_testing_lib.parse_test_proto(music_pb2.NoteSequence, '\n ticks_per_quarter: 220\n time_signatures: {\n num... |
'Verify properties of the St. Anne file.
The file contains 2 parts and 4 voices.'
| def test_st_anne(self):
| ns = musicxml_reader.musicxml_file_to_sequence_proto(self.st_anne_filename)
expected_ns = common_testing_lib.parse_test_proto(music_pb2.NoteSequence, '\n ticks_per_quarter: 220\n time_signatures {\n numera... |
'Verify that a part with an empty name can be parsed.'
| def test_empty_part_name(self):
| xml = '<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n <!DOCTYPE score-partwise PUBLIC\n "-//Recordare//DTD MusicXML 3.0 Partwise//EN"\n "http://www.musicxml.org/dtds/partwise.dtd">\... |
'Verify that a part without a corresponding score-part can be parsed.'
| def test_empty_part_list(self):
| xml = '<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n <!DOCTYPE score-partwise PUBLIC\n "-//Recordare//DTD MusicXML 3.0 Partwise//EN"\n "http://www.musicxml.org/dtds/partwise.dtd">\... |
'Verify that an empty doc can be parsed.'
| def test_empty_doc(self):
| xml = '<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n <!DOCTYPE score-partwise PUBLIC\n "-//Recordare//DTD MusicXML 3.0 Partwise//EN"\n "http://www.musicxml.org/dtds/partwise.dtd">\... |
'Test that a whole measure rest can be encoded using <forward>.
A whole measure rest is usually encoded as a <note> with a duration
equal to that of a whole measure. An alternative encoding is to
use the <forward> element to advance the time cursor to a duration
equal to that of a whole measure. This implies a whole me... | def test_whole_measure_rest_forward(self):
| ns = musicxml_reader.musicxml_file_to_sequence_proto(self.whole_measure_rest_forward_filename)
expected_ns = common_testing_lib.parse_test_proto(music_pb2.NoteSequence, '\n ticks_per_quarter: 220\n time_signatures {\n ... |
'Test that meters are encoded properly.
Musical meters are expressed as a ratio of beats to divisions.
The MusicXML parser uses this ratio in lowest terms for timing
purposes. However, the meters should be in the actual terms
when appearing in a NoteSequence.'
| def test_meter(self):
| ns = musicxml_reader.musicxml_file_to_sequence_proto(self.meter_test_filename)
expected_ns = common_testing_lib.parse_test_proto(music_pb2.NoteSequence, '\n ticks_per_quarter: 220\n time_signatures {\n num... |
'Construct a ChordProgression.'
| def __init__(self, events=None, **kwargs):
| if ('pad_event' in kwargs):
del kwargs['pad_event']
super(ChordProgression, self).__init__(pad_event=NO_CHORD, events=events, **kwargs)
|
'Adds the given chord to the `events` list.
`start_step` is set to the given chord. Everything after `start_step` in
`events` is deleted before the chord is added. `events`\'s length will be
changed so that the last event has index `end_step` - 1.
Args:
figure: Chord symbol figure. A string like "Cm9" representing the ... | def _add_chord(self, figure, start_step, end_step):
| if (start_step >= end_step):
raise BadChordException(('Start step does not precede end step: start=%d, end=%d' % (start_step, end_step)))
self.set_length(end_step)
for i in range(start_step, end_step):
self._events[i] = figure
|
'Populate self with the chords from the given quantized NoteSequence.
A chord progression is extracted from the given sequence starting at time
step `start_step` and ending at time step `end_step`.
The number of time steps per bar is computed from the time signature in
`quantized_sequence`.
Args:
quantized_sequence: A ... | def from_quantized_sequence(self, quantized_sequence, start_step, end_step):
| sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)
self._reset()
steps_per_bar_float = sequences_lib.steps_per_bar_in_quantized_sequence(quantized_sequence)
if ((steps_per_bar_float % 1) != 0):
raise events_lib.NonIntegerStepsPerBarException(('There are %f timesteps... |
'Converts the ChordProgression to NoteSequence proto.
This doesn\'t generate actual notes, but text annotations specifying the
chord changes when they occur.
Args:
sequence_start_time: A time in seconds (float) that the first chord in
the sequence will land on.
qpm: Quarter notes per minute (float).
Returns:
A NoteSequ... | def to_sequence(self, sequence_start_time=0.0, qpm=120.0):
| seconds_per_step = ((60.0 / qpm) / self.steps_per_quarter)
sequence = music_pb2.NoteSequence()
sequence.tempos.add().qpm = qpm
sequence.ticks_per_quarter = STANDARD_PPQ
current_figure = NO_CHORD
for (step, figure) in enumerate(self):
if (figure != current_figure):
current_fig... |
'Transpose chords in this ChordProgression.
Args:
transpose_amount: The number of half steps to transpose this
ChordProgression. Positive values transpose up. Negative values
transpose down.
Raises:
ChordSymbolException: If a chord (other than "no chord") fails to be
interpreted by the `chord_symbols_lib` module.'
| def transpose(self, transpose_amount):
| for i in range(len(self._events)):
if (self._events[i] != NO_CHORD):
self._events[i] = chord_symbols_lib.transpose_chord_symbol(self._events[i], (transpose_amount % NOTES_PER_OCTAVE))
|
'Renders the chord symbols of a NoteSequence.
This function renders chord symbol annotations in a NoteSequence as actual
notes. Notes are added to the NoteSequence object, and the chord symbols
remain also.
Args:
sequence: The NoteSequence for which to render chord symbols.'
| @abc.abstractmethod
def render(self, sequence):
| pass
|
'Initialize a BasicChordRenderer object.
Args:
velocity: The MIDI note velocity to use.
instrument: The MIDI instrument to use.
program: The MIDI program to use.
octave: The octave in which to render chord notes. If the bass note is not
otherwise part of the chord, it will not be rendered in this octave.
bass_octave: T... | def __init__(self, velocity=100, instrument=1, program=88, octave=4, bass_octave=3):
| self._velocity = velocity
self._instrument = instrument
self._program = program
self._octave = octave
self._bass_octave = bass_octave
|
'Construct a LeadSheet.
If `melody` and `chords` are specified, instantiate with the provided
melody and chords. Otherwise, create an empty LeadSheet.
Args:
melody: A Melody object.
chords: A ChordProgression object.
Raises:
MelodyChordsMismatchException: If the melody and chord progression differ
in temporal resoluti... | def __init__(self, melody=None, chords=None):
| if ((melody is None) != (chords is None)):
raise MelodyChordsMismatchException('melody and chords must be both specified or both unspecified')
if (melody is not None):
self._from_melody_and_chords(melody, chords)
else:
self._reset()
|
'Clear events and reset object state.'
| def _reset(self):
| self._melody = melodies_lib.Melody()
self._chords = chords_lib.ChordProgression()
|
'Initializes a LeadSheet with a given melody and chords.
Args:
melody: A Melody object.
chords: A ChordProgression object.
Raises:
MelodyChordsMismatchException: If the melody and chord progression differ
in temporal resolution or position in the source sequence.'
| def _from_melody_and_chords(self, melody, chords):
| if ((len(melody) != len(chords)) or (melody.steps_per_bar != chords.steps_per_bar) or (melody.steps_per_quarter != chords.steps_per_quarter) or (melody.start_step != chords.start_step) or (melody.end_step != chords.end_step)):
raise MelodyChordsMismatchException()
self._melody = melody
self._chords ... |
'Return an iterator over (melody, chord) tuples in this LeadSheet.
Returns:
Python iterator over (melody, chord) event tuples.'
| def __iter__(self):
| return itertools.izip(self._melody, self._chords)
|
'Returns the melody-chord tuple at the given index.'
| def __getitem__(self, i):
| return (self._melody[i], self._chords[i])
|
'Returns a LeadSheet object for the given slice range.'
| def __getslice__(self, i, j):
| return LeadSheet(self._melody[i:j], self._chords[i:j])
|
'How many events (melody-chord tuples) are in this LeadSheet.
Returns:
Number of events as an integer.'
| def __len__(self):
| return len(self._melody)
|
'Return the melody of the lead sheet.
Returns:
The lead sheet melody, a Melody object.'
| @property
def melody(self):
| return self._melody
|
'Return the chord progression of the lead sheet.
Returns:
The lead sheet chords, a ChordProgression object.'
| @property
def chords(self):
| return self._chords
|
'Appends event to the end of the sequence and increments the end step.
Args:
event: The event (a melody-chord tuple) to append to the end.'
| def append(self, event):
| (melody_event, chord_event) = event
self._melody.append(melody_event)
self._chords.append(chord_event)
|
'Converts the LeadSheet to NoteSequence proto.
Args:
velocity: Midi velocity to give each melody note. Between 1 and 127
(inclusive).
instrument: Midi instrument to give each melody note.
sequence_start_time: A time in seconds (float) that the first note (and
chord) in the sequence will land on.
qpm: Quarter notes per ... | def to_sequence(self, velocity=100, instrument=0, sequence_start_time=0.0, qpm=120.0):
| sequence = self._melody.to_sequence(velocity=velocity, instrument=instrument, sequence_start_time=sequence_start_time, qpm=qpm)
chord_sequence = self._chords.to_sequence(sequence_start_time=sequence_start_time, qpm=qpm)
for text_annotation in chord_sequence.text_annotations:
if (text_annotation.anno... |
'Transpose notes and chords in this LeadSheet.
All notes and chords are transposed the specified amount. Additionally,
all notes are octave shifted to lie within the [min_note, max_note) range.
Args:
transpose_amount: The number of half steps to transpose this
LeadSheet. Positive values transpose up. Negative values
tr... | def transpose(self, transpose_amount, min_note=0, max_note=128):
| self._melody.transpose(transpose_amount, min_note, max_note)
self._chords.transpose(transpose_amount)
|
'Transpose and octave shift the notes and chords in this LeadSheet.
Args:
min_note: Minimum pitch (inclusive) that the resulting notes will take on.
max_note: Maximum pitch (exclusive) that the resulting notes will take on.
transpose_to_key: The lead sheet is transposed to be in this key.
Returns:
The transpose amount,... | def squash(self, min_note, max_note, transpose_to_key):
| transpose_amount = self._melody.squash(min_note, max_note, transpose_to_key)
self._chords.transpose(transpose_amount)
return transpose_amount
|
'Sets the length of the lead sheet to the specified number of steps.
Args:
steps: How many steps long the lead sheet should be.'
| def set_length(self, steps):
| self._melody.set_length(steps)
self._chords.set_length(steps)
|
'Increase the resolution of a LeadSheet.
Increases the resolution of a LeadSheet object by a factor of `k`. This
increases the resolution of the melody and chords separately, which uses
MELODY_NO_EVENT to extend each event in the melody, and simply repeats each
chord event `k` times.
Args:
k: An integer, the factor by ... | def increase_resolution(self, k):
| self._melody.increase_resolution(k)
self._chords.increase_resolution(k)
|
'Constructs a BaseModel.'
| def __init__(self):
| self._session = None
|
'Builds and returns the model graph for generation.
Will be called before restoring a checkpoint file.
Returns:
The tf.Graph object.'
| @abc.abstractmethod
def _build_graph_for_generation(self):
| pass
|
'Builds the TF graph given a checkpoint file.
Calls into _build_graph_for_generation, which must be implemented by the
subclass, before restoring the checkpoint.
Args:
checkpoint_file: The path to the checkpoint file that should be used.'
| def initialize_with_checkpoint(self, checkpoint_file):
| graph = self._build_graph_for_generation()
with graph.as_default():
saver = tf.train.Saver()
self._session = tf.Session()
tf.logging.info('Checkpoint used: %s', checkpoint_file)
saver.restore(self._session, checkpoint_file)
|
'Builds the TF graph with a checkpoint and metagraph.
Args:
checkpoint_filename: The path to the checkpoint file that should be used.
metagraph_filename: The path to the metagraph file that should be used.'
| def initialize_with_checkpoint_and_metagraph(self, checkpoint_filename, metagraph_filename):
| with tf.Graph().as_default():
self._session = tf.Session()
new_saver = tf.train.import_meta_graph(metagraph_filename)
new_saver.restore(self._session, checkpoint_filename)
|
'Writes the checkpoint and metagraph.
Args:
checkpoint_filename: Path to the checkpoint file.'
| def write_checkpoint_with_metagraph(self, checkpoint_filename):
| with self._session.graph.as_default():
saver = tf.train.Saver(sharded=False, write_version=tf.train.SaverDef.V1)
saver.save(self._session, checkpoint_filename, meta_graph_suffix='meta', write_meta_graph=True)
|
'Closes the TF session.'
| def close(self):
| self._session.close()
self._session = None
|
'Appends event to the end of the sequence.
Args:
event: The event to append to the end.'
| @abc.abstractmethod
def append(self, event):
| pass
|
'Sets the length of the sequence to the specified number of steps.
If the event sequence is not long enough, will pad to make the sequence
the specified length. If it is too long, it will be truncated to the
requested length.
Args:
steps: How many steps long the event sequence should be.
from_left: Whether to add/remo... | @abc.abstractmethod
def set_length(self, steps, from_left=False):
| pass
|
'Returns the event at the given index.'
| @abc.abstractmethod
def __getitem__(self, i):
| pass
|
'Returns an iterator over the events.'
| @abc.abstractmethod
def __iter__(self):
| pass
|
'How many events are in this EventSequence.
Returns:
Number of events as an integer.'
| @abc.abstractmethod
def __len__(self):
| pass
|
'Construct a SimpleEventSequence.
If `events` is specified, instantiate with the provided event list.
Otherwise, create an empty SimpleEventSequence.
Args:
pad_event: Event value to use when padding sequences.
events: List of events to instantiate with.
start_step: The integer starting step offset.
steps_per_bar: The n... | def __init__(self, pad_event, events=None, start_step=0, steps_per_bar=DEFAULT_STEPS_PER_BAR, steps_per_quarter=DEFAULT_STEPS_PER_QUARTER):
| self._pad_event = pad_event
if (events is not None):
self._from_event_list(events, start_step=start_step, steps_per_bar=steps_per_bar, steps_per_quarter=steps_per_quarter)
else:
self._events = []
self._steps_per_bar = steps_per_bar
self._steps_per_quarter = steps_per_quarter
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.