text stringlengths 0 93.6k |
|---|
kernel_regularizer=l1_l2(l1=1e-5, l2=1e-4), |
), |
Dropout(0.2), # Add dropout layer |
LSTM(units=32, kernel_regularizer=l1_l2(l1=1e-5, l2=1e-4)), |
Dropout(0.2), # Add dropout layer |
Dense( |
units=16, activation="relu", kernel_regularizer=l1_l2(l1=1e-5, l2=1e-4) |
), |
Dense(units=1), |
] |
) |
model.compile(optimizer="adam", loss="mean_squared_error") |
return model |
# Create a GRU model for time series prediction |
def create_gru_model(input_shape): |
model = Sequential( |
[ |
GRU( |
units=64, |
return_sequences=True, |
input_shape=input_shape, |
kernel_regularizer=l1_l2(l1=1e-5, l2=1e-4), |
), |
Dropout(0.2), # Add dropout layer |
GRU(units=32, kernel_regularizer=l1_l2(l1=1e-5, l2=1e-4)), |
Dropout(0.2), # Add dropout layer |
Dense( |
units=16, activation="relu", kernel_regularizer=l1_l2(l1=1e-5, l2=1e-4) |
), |
Dense(units=1), |
] |
) |
model.compile(optimizer="adam", loss="mean_squared_error") |
return model |
# Train and evaluate a model using time series cross-validation |
def train_and_evaluate_model(model, X, y, n_splits=5, model_name="Model"): |
tscv = TimeSeriesSplit(n_splits=n_splits) |
all_predictions = [] |
all_true_values = [] |
with tqdm(total=n_splits, desc=f"Training {model_name}", leave=False) as pbar: |
for train_index, test_index in tscv.split(X): |
X_train, X_test = X[train_index], X[test_index] |
y_train, y_test = y[train_index], y[test_index] |
if isinstance(model, (RandomForestRegressor, XGBRegressor)): |
X_train_2d = X_train.reshape(X_train.shape[0], -1) |
X_test_2d = X_test.reshape(X_test.shape[0], -1) |
model.fit(X_train_2d, y_train) |
predictions = model.predict(X_test_2d) |
elif isinstance(model, Sequential): |
early_stopping = EarlyStopping( |
monitor="val_loss", patience=10, restore_best_weights=True |
) |
with tqdm(total=100, desc="Epochs", leave=False) as epoch_pbar: |
class EpochProgressCallback(Callback): |
def on_epoch_end(self, epoch, logs=None): |
epoch_pbar.update(1) |
model.fit( |
X_train, |
y_train, |
epochs=100, |
batch_size=32, |
verbose=0, |
validation_split=0.2, |
callbacks=[early_stopping, EpochProgressCallback()], |
) |
predictions = model.predict(X_test, verbose=0).flatten() |
all_predictions.extend(predictions) |
all_true_values.extend(y_test) |
pbar.update(1) |
score = r2_score(all_true_values, all_predictions) |
return score, 0, score, np.array(all_predictions) |
# Make predictions using an ensemble of models |
def ensemble_predict(models, X): |
predictions = [] |
for model in models: |
if isinstance(model, (RandomForestRegressor, XGBRegressor)): |
pred = model.predict(X.reshape(X.shape[0], -1)) |
else: |
pred = model.predict(X) |
predictions.append(pred.flatten()) # Flatten the predictions |
return np.mean(predictions, axis=0) |
def weighted_ensemble_predict(models, X, weights): |
predictions = [] |
for model, weight in zip(models, weights): |
if isinstance(model, (RandomForestRegressor, XGBRegressor)): |
pred = model.predict(X.reshape(X.shape[0], -1)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.