import os
os.environ["KERAS_BACKEND"] = "jax"
import keras
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# ----- JAX最適化(64bit演算禁止) -----
from jax import config
config.update("jax_enable_x64", False)
# ----- フォント設定(任意) -----
plt.rcParams['font.family'] = 'Noto Sans CJK JP'
# ----- データ読み込み -----
data_label = np.load("data_label.npz")
data = data_label["data"] # (N, 200, 5)
label = data_label["label"] # (N, 6)
x_train, x_test, y_train, y_test = train_test_split(
data, label, test_size=0.3, random_state=0
)
# ----- ラベルスケーリング(L/C素子の桁差対策) -----
scaler_y = StandardScaler()
y_train_f = scaler_y.fit_transform(y_train)
y_test_f = scaler_y.transform(y_test)
# ----- モデル構築(軽量2-stack LSTM) -----
inputs = keras.Input(shape=(x_train.shape[1], x_train.shape[2])) # (200,5)
x = keras.layers.LSTM(64, return_sequences=True)(inputs)
x = keras.layers.LSTM(48, return_sequences=False)(x)
# optional(速度ほぼ落とさず汎化UP)
x = keras.layers.Dropout(0.05)(x)
outputs = keras.layers.Dense(6)(x)
model = keras.Model(inputs, outputs)
model.compile(
loss='mse',
optimizer=keras.optimizers.Adam(learning_rate=0.001)
)
# ----- Callbacks(収束高速化 / 無駄epochsカット) -----
callbacks = [
keras.callbacks.EarlyStopping(
monitor="val_loss",
patience=25,
restore_best_weights=True
),
keras.callbacks.ReduceLROnPlateau(
monitor="val_loss",
patience=10,
factor=0.5,
min_lr=1e-6
)
]
# ----- 学習(batch_size=128でJAX高速) -----
history = model.fit(
x_train,
y_train_f,
batch_size=128,
epochs=300,
validation_split=0.15,
callbacks=callbacks,
verbose=1
)
# ----- 予測(スケールを戻す) -----
y_pred_f = model.predict(x_test)
y_pred = scaler_y.inverse_transform(y_pred_f)
# ==========================
# R2評価
# ==========================
metric = keras.metrics.R2Score()
metric.update_state(y_test, y_pred)
print("\nR2 Score:", metric.result())
# ==========================
# パーセント誤差(0除算対策)
# ==========================
# 小さい値の分母マスク
eps = 1e-12
valid_mask = np.abs(y_test) > eps
pct_error = np.zeros_like(y_test)
pct_error[valid_mask] = np.abs(
(y_test[valid_mask] - y_pred[valid_mask]) / y_test[valid_mask] * 100
)
print("\n% Error per element (mean):", pct_error.mean(axis=0))
# ==========================
# 相関プロット
# ==========================
row, column = 2, 3
legend = ["L1", "C1", "L2", "C2", "L3", "C3"]
fig, ax = plt.subplots(2, 3, figsize=(15,9))
for i in range(row):
for j in range(column):
count = column * i + j
maxvalue = y_pred[:, count].max()
ax[i,j].scatter(y_pred[:, count], y_test[:,count], c="r", s=5)
ax[i,j].plot([0,maxvalue], [0,maxvalue], "--", c="black")
ax[i,j].set_xlabel("推定した値")
ax[i,j].set_ylabel("実際の値")
ax[i,j].set_xlim(0, maxvalue)
ax[i,j].set_ylim(0, maxvalue)
ax[i,j].grid()
ax[i,j].legend([legend[count] + f" 平均誤差{pct_error.mean(axis=0)[count]:.2f}%"])
fig.tight_layout()
plt.show()
最近のコメント