L2, L1 norm regularization

Weight Decay를 통해서 Weight의 학습 반경을 변경

# Weight Decay를 전체적으로 반영한 예시 코드
from tensorflow.keras.constraints import MaxNorm
from tensorflow.keras import regularizers

# 모델 구성을 확인합니다.
model = Sequential([
    Flatten(input_shape=(28, 28)),
    Dense(64, 
            kernel_regularizer=regularizers.l2(0.01),    # L2 norm regularization
            activity_regularizer=regularizers.l1(0.01)), # L1 norm regularization    
    Dense(10, activation='softmax')
])
# 업데이트 방식을 설정합니다.
model.compile(optimizer='adam'
             , loss='sparse_categorical_crossentropy'
             , metrics=['accuracy'])
model.summary()

model.fit(X_train, y_train, batch_size=30, epochs=1, verbose=1, 
          validation_data=(X_test,y_test))

Constraints

물리적으로 Weight의 크기를 제한하는 방법입니다.

Weight자체를 함수를 이용하여 더 큰 경우는 임의의 값으로 변경해버리는 기술을 사용하게 됩니다. 그러면 더이상 커질 수가 없겠죠?

# 모델 구성을 확인합니다.
model = Sequential([
    Flatten(input_shape=(28, 28)),
    Dense(64, 
            kernel_regularizer=regularizers.l2(0.01),
            activity_regularizer=regularizers.l1(0.01),
            kernel_constraint=MaxNorm(2.)),             ## add constraints
    Dense(10, activation='softmax')
])
# 업데이트 방식을 설정합니다.
model.compile(optimizer='adam'
             , loss='sparse_categorical_crossentropy'
             , metrics=['accuracy'])
model.summary()

model.fit(X_train, y_train, batch_size=30, epochs=1, verbose=1, 
          validation_data=(X_test,y_test))

DropOut

from tensorflow.keras.layers import Dropout
from tensorflow.keras.constraints import MaxNorm
from tensorflow.keras import regularizers
# 모델 구성을 확인합니다.
model = Sequential([
    Flatten(input_shape=(28, 28)),
    Dense(64, 
            kernel_regularizer=regularizers.l2(0.01),
            activity_regularizer=regularizers.l1(0.01),
            kernel_constraint=MaxNorm(2.)),             
    Dropout(0.5)       ,                                   ## add dropout
    Dense(10, activation='softmax')
])
# 업데이트 방식을 설정합니다.
model.compile(optimizer='adam'
             , loss='sparse_categorical_crossentropy'
             , metrics=['accuracy'])
model.summary()

model.fit(X_train, y_train, batch_size=30, epochs=1, verbose=1, 
          validation_data=(X_test,y_test))

Learning rate decay

tf.keras.optimizers.Adam(
    learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False,
    name='Adam'
)
model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001, beta_1 = 0.89)
             , loss='sparse_categorical_crossentropy'
             , metrics=['accuracy'])
model.summary()

model.fit(X_train, y_train, batch_size=30, epochs=1, verbose=1, 
          validation_data=(X_test,y_test))
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
    initial_learning_rate=1e-2,
    decay_steps=10000,
    decay_rate=0.9)

model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr_schedule)
             , loss='sparse_categorical_crossentropy'
             , metrics=['accuracy'])
model.summary()

model.fit(X_train, y_train, batch_size=30, epochs=1, verbose=1, 
          validation_data=(X_test,y_test))
def decayed_learning_rate(step):
  step = min(step, decay_steps)
  cosine_decay = 0.5 * (1 + cos(pi * step / decay_steps))
  decayed = (1 - alpha) * cosine_decay + alpha
  return initial_learning_rate * decayed

first_decay_steps = 1000
initial_learning_rate = 0.01
lr_decayed_fn = (
  tf.keras.experimental.CosineDecayRestarts(
      initial_learning_rate,
      first_decay_steps))

model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr_schedule)
             , loss='sparse_categorical_crossentropy'
             , metrics=['accuracy'])
model.summary()

model.fit(X_train, y_train, batch_size=30, epochs=1, verbose=1, 
          validation_data=(X_test,y_test))