Computing

Tensorflow2 기본 구조 본문

Deep Learning/개념

Tensorflow2 기본 구조

jhson989 2023. 12. 28. 22:49
import pandas as pd
import numpy as np
import tensorflow as tf

############## Training hyper-parameters 정의
lr = 5e-4
batch_size = 4096
num_epoch = 1000
decay = 1e-5

############## Dataset 정의
all_df = pd.read_csv('./data/7th-data.csv', encoding='cp949', skiprows=1)

# Input data 추출
data = all_df.iloc[:, 7:].to_numpy()
# Label 추출
anomalyCategory = {
    'REAL':1,
    'FALSE':0,
}
label = all_df.iloc[:, 6:7]
label['FALSE'] = label['FALSE'].apply(lambda x: anomalyCategory[x])
label = label.to_numpy()
############## train_test_split
from sklearn.model_selection import train_test_split
dataTrain, dataTest, labelTrain, labelTest = train_test_split(data, label, test_size=0.2, random_state=0)
trainMean = dataTrain.mean()
trainStd = dataTrain.std()
## Data normalization
dataTrain = (dataTrain-trainMean)/trainStd
dataTest = (dataTest-trainMean)/trainStd
## label one-hot encoding
labelAnomalyOneHotTrain = tf.one_hot(labelTrain[:,0], depth=2)
labelAnomalyOneHotTest = tf.one_hot(labelTest[:,0], depth=2)
## Dataset 정의
datasetAnomalyTrain = tf.data.Dataset.from_tensor_slices((dataTrain, labelAnomalyOneHotTrain)).shuffle(10000).batch(batch_size)
datasetAnomalyTest = tf.data.Dataset.from_tensor_slices((dataTest, labelAnomalyOneHotTest)).shuffle(10000).batch(batch_size)

############## Model 정의

from tensorflow.keras.regularizers import l2
class FeatureExtractor(tf.keras.Model):
    def __init__(self):
        super(FeatureExtractor, self).__init__()
        self.extractor1 = tf.keras.Sequential([
            tf.keras.layers.Dense(64, activation='gelu', kernel_regularizer=l2(decay) if decay!=0 else None),
            tf.keras.layers.Dense(512, activation='gelu', kernel_regularizer=l2(decay) if decay!=0 else None),
            tf.keras.layers.BatchNormalization(),
            tf.keras.layers.Dropout(0.2),
            tf.keras.layers.Dense(256, activation='gelu', kernel_regularizer=l2(decay) if decay!=0 else None),
            tf.keras.layers.BatchNormalization(),
            tf.keras.layers.Dropout(0.2),           
            tf.keras.layers.Dense(128, activation='gelu', kernel_regularizer=l2(decay) if decay!=0 else None),
            tf.keras.layers.BatchNormalization(),
            tf.keras.layers.Dropout(0.2),
            tf.keras.layers.Dense(128, activation='gelu', kernel_regularizer=l2(decay) if decay!=0 else None),
            tf.keras.layers.BatchNormalization(),
            tf.keras.layers.Dropout(0.2),
            tf.keras.layers.Dense(64, activation='gelu', kernel_regularizer=l2(decay) if decay!=0 else None),
            tf.keras.layers.BatchNormalization(),
        ])
        
    def call(self, x):
        x1 = self.extractor1(x)
        return x1

class Classifier(tf.keras.Model):
    def __init__(self, featureExtractor, numCategories):
        super(Classifier, self).__init__()
        self.classfier = tf.keras.Sequential([
            featureExtractor,
            tf.keras.layers.Dense(64, activation='gelu', kernel_regularizer=l2(decay) if decay!=0 else None),
            tf.keras.layers.Dense(numCategories, activation='softmax'),
        ])
    def call(self, feature):
        return self.classfier(feature)

############## classifierAnomaly 정의
classifierAnomaly = Classifier(FeatureExtractor(), 2)
classifierAnomaly.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(learning_rate=lr), metrics=['accuracy'])

print("\t[Train] Anomaly")
anomalyTrainHistory = classifierAnomaly.fit(datasetAnomalyTrain, validation_data=datasetAnomalyTest, verbose=2, epochs=num_epoch)
print("\n[Test]")
print("\tAbnomaly accuracy: %.4f" % classifierAnomaly.evaluate(datasetAnomalyTest, verbose=0, return_dict=True)["accuracy"])