Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,8 @@
*.pyc
.DS_store
.eggs
build
deps
dist
fiddler.egg-info
models
5 changes: 3 additions & 2 deletions src/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,19 +21,20 @@ def main():
@click.option("--learning-rate", "-r", default=1e-3, type=click.FLOAT, help="Learning Rate")
@click.option("--num-steps", "-n", type=click.INT, default=15, help="No. of time steps in RNN")
@click.option("--cell-size", "-s", type=click.INT, default=100, help="Dimension of cell states")
@click.option("--dropout", "-d", type=click.FLOAT, help="Dropout probability for the output")
@click.option("--epochs", "-e", type=click.INT,
help="No. of epochs to run training for")
@click.option("--cell", "-c", type=click.Choice(['lstm', 'gru']),
default="lstm", help="Type of cell used in RNN")
@click.option("--test-seed", "-t", help="Seed input for printing predicted text after each training step")
@click.option("--delim/--no-delim", default=True, help="Delimit tunes with start and end symbol")
def train_rnn(file, batch_size, layers, learning_rate,
num_steps, cell_size, epochs, cell, test_seed, delim):
num_steps, cell_size, dropout, epochs, cell, test_seed, delim):
""" Train neural network """
ds = Dataset(file, batch_size=batch_size,
num_steps=num_steps, with_delim=delim)
n = RNN(data=ds, cell=cell, num_layers=layers,
learning_rate=learning_rate, cell_size=cell_size, num_epochs=epochs)
learning_rate=learning_rate, cell_size=cell_size, dropout=dropout, num_epochs=epochs)
n.train(test_output=True, test_seed=test_seed, with_delim=delim)


Expand Down
11 changes: 10 additions & 1 deletion src/network.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

class RNN(object):

def __init__(self, data, cell, cell_size, num_layers=2, num_epochs=50, learning_rate=1e-3):
def __init__(self, data, cell, cell_size, dropout=0.2, num_layers=2, num_epochs=50, learning_rate=1e-3):
"""
`data` is dataset.Dataset object
`cell_size` is the Dimensions for each RNN cell's parameters (i.e. c and h)
Expand All @@ -18,6 +18,7 @@ def __init__(self, data, cell, cell_size, num_layers=2, num_epochs=50, learning_
self.batch_size = self.data.batch_size
self.cell = cell
self.cell_size = cell_size
self.dropout = dropout
self.num_layers = num_layers
self.num_steps = self.data.num_steps
self.num_classes = self.data.vocab_size
Expand Down Expand Up @@ -58,12 +59,20 @@ def _build(self):
for i in range(self.num_layers)])
single_cell = tf.nn.rnn_cell.LSTMCell(
self.cell_size, forget_bias=1.0)
"""Use dropout only for training"""
if self.dropout:
single_cell = tf.contrib.rnn.DropoutWrapper(
single_cell, output_keep_prob=self.dropout)
multi_cell = tf.nn.rnn_cell.MultiRNNCell([single_cell for _ in xrange(self.num_layers)],
state_is_tuple=True)
else:
rnn_states = tuple([state_per_layer[i]
for i in range(self.num_layers)])
single_cell = tf.nn.rnn_cell.GRUCell(self.cell_size)
"""Use dropout only for training"""
if self.dropout:
single_cell = tf.contrib.rnn.DropoutWrapper(
single_cell, output_keep_prob=self.dropout)
multi_cell = tf.nn.rnn_cell.MultiRNNCell(
[single_cell] * self.num_layers)

Expand Down