Change Flatten layer, loss function and add Input

This commit is contained in:
coolneng 2021-06-26 17:52:20 +02:00
parent 4d67bdac30
commit e07d0dcdbf
Signed by: coolneng
GPG Key ID: 9893DA236405AF57
3 changed files with 24 additions and 45 deletions

View File

@ -3,7 +3,7 @@ TRAIN_DATASET = "data/train_data.tfrecords"
TEST_DATASET = "data/test_data.tfrecords" TEST_DATASET = "data/test_data.tfrecords"
EVAL_DATASET = "data/eval_data.tfrecords" EVAL_DATASET = "data/eval_data.tfrecords"
EPOCHS = 1000 EPOCHS = 1000
BATCH_SIZE = 256 BATCH_SIZE = 1
LEARNING_RATE = 0.004 LEARNING_RATE = 0.004
L2 = 0.001 L2 = 0.001
LOG_DIR = "logs" LOG_DIR = "logs"

View File

@ -1,8 +1,9 @@
from random import seed from random import seed
from tensorflow.keras import Model, Sequential, layers from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import *
from tensorflow.keras.callbacks import TensorBoard from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.losses import sparse_categorical_crossentropy from tensorflow.keras.losses import categorical_crossentropy
from tensorflow.keras.optimizers import Adam from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2 from tensorflow.keras.regularizers import l2
from tensorflow.random import set_seed from tensorflow.random import set_seed
@ -15,47 +16,28 @@ def build_model() -> Model:
""" """
Build the CNN model Build the CNN model
""" """
model = Sequential() model = Sequential(
model.add( [
layers.Conv1D( Input(shape=(None, len(BASES))),
filters=16, Conv1D(
kernel_size=5, filters=16, kernel_size=5, activation="relu", kernel_regularizer=l2(L2)
activation="relu", ),
kernel_regularizer=l2(L2), MaxPool1D(pool_size=3, strides=1),
Conv1D(
filters=16, kernel_size=3, activation="relu", kernel_regularizer=l2(L2)
),
MaxPool1D(pool_size=3, strides=1),
GlobalAveragePooling1D(),
Dense(units=16, activation="relu", kernel_regularizer=l2(L2)),
Dropout(rate=0.3),
Dense(units=16, activation="relu", kernel_regularizer=l2(L2)),
Dropout(rate=0.3),
Dense(units=len(BASES), activation="softmax"),
]
) )
)
model.add(layers.MaxPool1D(pool_size=3, strides=1))
model.add(
layers.Conv1D(
filters=16,
kernel_size=3,
activation="relu",
kernel_regularizer=l2(L2),
)
)
model.add(layers.MaxPool1D(pool_size=3, strides=1))
model.add(layers.Flatten())
model.add(
layers.Dense(
units=16,
activation="relu",
kernel_regularizer=l2(L2),
)
)
model.add(layers.Dropout(rate=0.3))
model.add(
layers.Dense(
units=16,
activation="relu",
kernel_regularizer=l2(L2),
)
)
model.add(layers.Dropout(rate=0.3))
# FIXME Change output size
model.add(layers.Dense(units=len(BASES), activation="softmax"))
model.compile( model.compile(
optimizer=Adam(LEARNING_RATE), optimizer=Adam(LEARNING_RATE),
loss=sparse_categorical_crossentropy, loss=categorical_crossentropy,
metrics=["accuracy"], metrics=["accuracy"],
) )
return model return model

View File

@ -50,10 +50,7 @@ def read_fastq(data_file, label_file) -> List[bytes]:
examples = [] examples = []
with open(data_file) as data, open(label_file) as labels: with open(data_file) as data, open(label_file) as labels:
for element, label in zip(parse(data, "fastq"), parse(labels, "fastq")): for element, label in zip(parse(data, "fastq"), parse(labels, "fastq")):
example = generate_example( example = generate_example(sequence=str(element.seq), label=str(label.seq))
sequence=str(element.seq),
label=str(label.seq),
)
examples.append(example) examples.append(example)
return examples return examples