Personal copywriter

import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, Dropout
from tensorflow.keras.callbacks import ModelCheckpoint

# Define hyperparameters
SEQ_LENGTH = 50
BATCH_SIZE = 128
EPOCHS = 50
DROPOUT_RATE = 0.2

# Load and preprocess input text
with open(“input.txt”, “r”) as f:
text = f.read().lower()
chars = sorted(list(set(text)))
char_to_int = dict((c, i) for i, c in enumerate(chars))
n_chars = len(text)
n_vocab = len(chars)
X = np.zeros((n_chars – SEQ_LENGTH, SEQ_LENGTH, n_vocab), dtype=np.bool)
y = np.zeros((n_chars – SEQ_LENGTH, n_vocab), dtype=np.bool)
for i in range(0, n_chars – SEQ_LENGTH):
for j in range(SEQ_LENGTH):
X[i, j, char_to_int[text[i+j]]] = 1
y[i, char_to_int[text[i+SEQ_LENGTH]]] = 1

# Define the RNN model
model = Sequential()
model.add(LSTM(256, input_shape=(SEQ_LENGTH, n_vocab), return_sequences=True))
model.add(Dropout(DROPOUT_RATE))
model.add(LSTM(256))
model.add(Dropout(DROPOUT_RATE))
model.add(Dense(n_vocab, activation=”softmax”))
model.compile(loss=”categorical_crossentropy”, optimizer=”adam”)

# Train the model
filepath = “weights-improvement-{epoch:02d}-{loss:.4f}.hdf5″
checkpoint = ModelCheckpoint(filepath, monitor=”loss”, verbose=1, save_best_only=True, mode=”min”)
callbacks_list = [checkpoint]
model.fit(X, y, batch_size=BATCH_SIZE, epochs=EPOCHS, callbacks=callbacks_list)

# Generate text based on the learned model
start_index = np.random.randint(0, n_chars – SEQ_LENGTH – 1)
generated_text = “”
sentence = text[start_index:start_index + SEQ_LENGTH]
generated_text += sentence
for i in range(500):
x = np.zeros((1, SEQ_LENGTH, n_vocab))
for j, char in enumerate(sentence):
x[0, j, char_to_int[char]] = 1
prediction = model.predict(x, verbose=0)[0]
next_index = np.argmax(prediction)
next_char = chars[next_index]
generated_text += next_char
sentence = sentence[1:] + next_char

print(generated_text)