text stringlengths 0 93.6k |
|---|
else: |
pred = np.array( |
[model.predict(X[i : i + 1], verbose=0)[0][0] for i in range(len(X))] |
) |
predictions.append(weight * pred) |
return np.sum(predictions, axis=0) |
# Calculate risk metrics (Sharpe ratio and max drawdown) |
def calculate_risk_metrics(returns): |
sharpe_ratio = ( |
np.mean(returns) / np.std(returns) * np.sqrt(252) |
) # Assuming daily returns |
max_drawdown = np.max(np.maximum.accumulate(returns) - returns) |
return sharpe_ratio, max_drawdown |
# Predict future stock prices using a trained model |
def predict_future(model, last_sequence, scaler, days): |
future_predictions = [] |
current_sequence = last_sequence.copy() |
with tqdm(total=days, desc="Predicting future", leave=False) as pbar: |
for _ in range(days): |
if isinstance(model, (RandomForestRegressor, XGBRegressor)): |
prediction = model.predict(current_sequence.reshape(1, -1)) |
future_predictions.append( |
prediction[0] |
) # prediction is already a scalar |
else: |
prediction = model.predict( |
current_sequence.reshape( |
1, current_sequence.shape[0], current_sequence.shape[1] |
), |
verbose=0, |
) |
future_predictions.append( |
prediction[0][0] |
) # Take only the first (and only) element |
# Update the sequence for the next prediction |
current_sequence = np.roll(current_sequence, -1, axis=0) |
current_sequence[-1] = prediction # Use the full prediction for updating |
pbar.update(1) |
return np.array(future_predictions) |
# Split data into training and testing sets, respecting temporal order |
def time_based_train_test_split(X, y, test_size=0.2): |
""" |
Split the data into training and testing sets, respecting the temporal order. |
""" |
split_idx = int(len(X) * (1 - test_size)) |
X_train, X_test = X[:split_idx], X[split_idx:] |
y_train, y_test = y[:split_idx], y[split_idx:] |
return X_train, X_test, y_train, y_test |
# Tune hyperparameters for Random Forest model |
def tune_random_forest(X, y, quick_test=False): |
# Define parameter distribution based on quick_test flag |
if quick_test: |
print("Quick test mode: Performing simplified Random Forest tuning...") |
param_dist = {"n_estimators": randint(10, 50), "max_depth": randint(3, 10)} |
n_iter = 5 |
else: |
print("Full analysis mode: Performing comprehensive Random Forest tuning...") |
param_dist = { |
"n_estimators": randint(100, 500), |
"max_depth": randint(5, 50), |
"min_samples_split": randint(2, 20), |
"min_samples_leaf": randint(1, 10), |
"max_features": ["auto", "sqrt", "log2"], |
"bootstrap": [True, False], |
} |
n_iter = 20 |
# Initialize Random Forest model |
rf = RandomForestRegressor(random_state=42) |
# Perform randomized search for best parameters |
tscv = TimeSeriesSplit(n_splits=3 if quick_test else 5) |
rf_random = RandomizedSearchCV( |
estimator=rf, |
param_distributions=param_dist, |
n_iter=n_iter, |
cv=tscv, |
scoring="neg_mean_squared_error", # Change to MSE |
verbose=2, |
random_state=42, |
n_jobs=-1, |
) |
rf_random.fit(X.reshape(X.shape[0], -1), y) |
print(f"Best Random Forest parameters: {rf_random.best_params_}") |
return rf_random.best_estimator_ |
# Tune hyperparameters for XGBoost model |
def tune_xgboost(X, y, quick_test=False): |
# Define parameter distribution based on quick_test flag |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.