query = """ SELECT * FROM `stockprediction-434721.stock_data.google_prices` WHERE CAST(Date AS DATE) >= DATE_SUB(CURRENT_DATE(), INTERVAL 5 YEAR) """ google_df = client.query(query).to_dataframe() googl_df.head(10) # Check for any missing or null values print(google_df.isnull().sum()) import pandas as pd # Ensure that Date is in datetime format google_df['Date'] = pd.to_datetime(google_df['Date']) # Drop columns that are not necessary for modeling # Adjust this based on your needs google_df = google_df.drop(columns=['Adj Close']) # Sort data by Date in ascending order google_df = google_df.sort_values(by='Date', ascending=True) # Preview updated dataframes print(google_df.head()) # Feature Engineering for google # 1. Moving Averages google_df['7_day_MA'] = google_df['Close'].rolling(window=7).mean() google_df['30_day_MA'] = google_df['Close'].rolling(window=30).mean() # 2. Volatility (Standard deviation of daily returns over 7 and 30 days) google_df['7_day_volatility'] = google_df['Close'].pct_change().rolling(window=7).std() google_df['30_day_volatility'] = google_df['Close'].pct_change().rolling(window=30).std() # 3. Lag Features (Previous day's price and volume) google_df['Previous_Close'] = google_df['Close'].shift(1) google_df['Previous_Volume'] = google_df['Volume'].shift(1) # 4. Daily Returns google_df['Daily_Return'] = google_df['Close'].pct_change() # Preview updated dataframe for google print(google_df.head()) # Check for missing values in each column for google print(google_df.isna().sum()) # Visualize where NaNs occur in google data import matplotlib.pyplot as plt import seaborn as sns plt.figure(figsize=(10, 6)) sns.heatmap(google_df.isna(), cbar=False, cmap="viridis") plt.title('google Data Missing Values') plt.show() # Drop rows with NaN values in the google dataframe google_df_cleaned = google_df.dropna() # Preview the cleaned google dataframe print(google_df_cleaned.head()) print(google_df_cleaned.shape) # Define the filename for the google dataframe google_csv_filename = "google_cleaned_feature_engineered.csv" # Export the cleaned google dataframe to CSV google_df_cleaned.to_csv(google_csv_filename, index=False) print(f"Dataframe exported to CSV: {google_csv_filename}") from google.colab import files # Download the google CSV file to your local machine files.download('google_cleaned_feature_engineered.csv') from sklearn.model_selection import train_test_split # Define features and target variable X_google = google_df_cleaned[['7_day_MA', '30_day_MA', '7_day_volatility', '30_day_volatility', 'Previous_Close', 'Previous_Volume', 'Daily_Return']] y_google = google_df_cleaned['Close'] # Split the data X_train_google, X_test_google, y_train_google, y_test_google = train_test_split(X_google, y_google, test_size=0.2, random_state=42) # Preview the shapes print(X_train_google.shape, X_test_google.shape, y_train_google.shape, y_test_google.shape) from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, r2_score # Initialize the model model_google = LinearRegression() # Train the model on the training data model_google.fit(X_train_google, y_train_google) # Predict on the test data y_pred_google = model_google.predict(X_test_google) # Evaluate the model mse_google = mean_squared_error(y_test_google, y_pred_google) r2_google = r2_score(y_test_google, y_pred_google) print("google Linear Regression Performance:") print(f"Mean Squared Error: {mse_google}") print(f"R-squared: {r2_google}") import matplotlib.pyplot as plt import numpy as np # Define the cyberpunk theme colors cyberpunk_blue = '#00FFFF' cyberpunk_red = '#FF007F' cyberpunk_background = '#0D0D0D' # Customize the plot style plt.style.use('dark_background') # Plot for google stock plt.figure(figsize=(10, 6)) plt.plot(np.arange(len(y_test_google)), y_test_google, color=cyberpunk_blue, label='Actual Price', linewidth=2) plt.plot(np.arange(len(y_pred_google)), y_pred_google, color=cyberpunk_red, linestyle='--', label='Predicted Price', linewidth=2) plt.title('google Stock Price - Actual vs Predicted', fontsize=16, color=cyberpunk_blue) plt.xlabel('Date', fontsize=12, color='white') plt.ylabel('Price', fontsize=12, color='white') plt.legend(loc='upper left', fontsize=10) plt.grid(True, color='#333333') plt.gca().set_facecolor(cyberpunk_background) plt.show() from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_squared_error, r2_score # Initialize the model rf_google = RandomForestRegressor(n_estimators=100, random_state=42) # Train the model on the training data rf_google.fit(X_train_google, y_train_google) # Predict on the test data y_pred_rf_google = rf_google.predict(X_test_google) # Evaluate the model mse_rf_google = mean_squared_error(y_test_google, y_pred_rf_google) r2_rf_google = r2_score(y_test_google, y_pred_rf_google) print("google Random Forest Performance:") print(f"Mean Squared Error: {mse_rf_google}") print(f"R-squared: {r2_rf_google}") # Visualization for Random Forest - google plt.figure(figsize=(10, 6)) plt.plot(y_test_google[:250].values, color="cyan", label="Actual Price") plt.plot(y_pred_rf_google[:250], 'm--', label="Predicted Price") plt.title("google Stock Price - Actual vs Predicted (Random Forest)", color="cyan") plt.xlabel("Date", color="cyan") plt.ylabel("Price", color="cyan") plt.legend(loc="best") plt.grid(True, linestyle='--', alpha=0.7) plt.gca().set_facecolor("black") plt.gca().spines["bottom"].set_color("cyan") plt.gca().spines["top"].set_color("cyan") plt.gca().spines["left"].set_color("cyan") plt.gca().spines["right"].set_color("cyan") plt.show() # Get feature importance from the Random Forest model importances_google = rf_google.feature_importances_ # Create a dataframe for the features and their importance feature_names_google = X_train_google.columns importance_df_google = pd.DataFrame({ 'Feature': feature_names_google, 'Importance': importances_google }) # Sort the dataframe by importance importance_df_google = importance_df_google.sort_values(by='Importance', ascending=False) # Plot the feature importance plt.figure(figsize=(10, 6)) plt.barh(importance_df_google['Feature'], importance_df_google['Importance'], color='cyan') plt.xlabel('Feature Importance', color='cyan') plt.ylabel('Features', color='cyan') plt.title('google Stock Feature Importance (Random Forest)', color='cyan') plt.gca().set_facecolor('black') plt.gca().spines['bottom'].set_color('cyan') plt.gca().spines['top'].set_color('cyan') plt.gca().spines['left'].set_color('cyan') plt.gca().spines['right'].set_color('cyan') plt.show() from sklearn.ensemble import GradientBoostingRegressor from sklearn.metrics import mean_squared_error, r2_score # Initialize the Gradient Boosting model gb_google = GradientBoostingRegressor(n_estimators=100, random_state=42) # Train the model on the training data gb_google.fit(X_train_google, y_train_google) # Predict on the test data y_pred_gb_google = gb_google.predict(X_test_google) # Evaluate the model mse_gb_google = mean_squared_error(y_test_google, y_pred_gb_google) r2_gb_google = r2_score(y_test_google, y_pred_gb_google) print("google Gradient Boosting Performance:") print(f"Mean Squared Error: {mse_gb_google}") print(f"R-squared: {r2_gb_google}") # Visualization for Gradient Boosting - google plt.figure(figsize=(10, 6)) plt.plot(y_test_google[:250].values, color="cyan", label="Actual Price") plt.plot(y_pred_gb_google[:250], 'm--', label="Predicted Price") plt.title("google Stock Price - Actual vs Predicted (Gradient Boosting)", color="cyan") plt.xlabel("Date", color="cyan") plt.ylabel("Price", color="cyan") plt.legend(loc="best") plt.grid(True, linestyle='--', alpha=0.7) plt.gca().set_facecolor("black") plt.gca().spines["bottom"].set_color("cyan") plt.gca().spines["top"].set_color("cyan") plt.gca().spines["left"].set_color("cyan") plt.gca().spines["right"].set_color("cyan") plt.show() from sklearn.model_selection import GridSearchCV from sklearn.ensemble import GradientBoostingRegressor from sklearn.metrics import mean_squared_error, r2_score # Define the parameter grid for Gradient Boosting param_grid = { 'n_estimators': [100, 200, 300], # Number of boosting stages to be run 'learning_rate': [0.01, 0.1, 0.2], # Step size shrinkage used in update 'max_depth': [3, 5, 7], # Maximum depth of the individual regression estimators 'min_samples_split': [2, 5, 10], # Minimum number of samples required to split an internal node 'min_samples_leaf': [1, 2, 4] # Minimum number of samples required to be at a leaf node } # Initialize the Gradient Boosting Regressor for google gb_google = GradientBoostingRegressor(random_state=42) # Initialize GridSearchCV to find the best parameters grid_search_google = GridSearchCV(estimator=gb_google, param_grid=param_grid, cv=5, n_jobs=-1, verbose=2) # Fit the model to the google training data grid_search_google.fit(X_train_google, y_train_google) # Get the best parameters from GridSearch best_params_google = grid_search_google.best_params_ print("Best parameters for google:", best_params_google) # Evaluate the model with the best parameters best_gb_google = grid_search_google.best_estimator_ y_pred_gb_google = best_gb_google.predict(X_test_google) mse_google = mean_squared_error(y_test_google, y_pred_gb_google) r2_google = r2_score(y_test_google, y_pred_gb_google) print("google Gradient Boosting Performance (Tuned):") print(f"Mean Squared Error: {mse_google}") print(f"R-squared: {r2_google}") import joblib joblib.dump(best_gb_google, 'best_gb_google_model.pkl') #import joblib # best_gb_google_loaded = joblib.load('best_gb_google_model.pkl') # future_predictions = best_gb_google_loaded.predict(new_data) from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense from sklearn.preprocessing import StandardScaler from sklearn.metrics import mean_squared_error, r2_score # Feature scaling scaler = StandardScaler() X_train_google_scaled = scaler.fit_transform(X_train_google) X_test_google_scaled = scaler.transform(X_test_google) # Define the neural network model model_google = Sequential([ Dense(64, input_dim=X_train_google.shape[1], activation='relu'), Dense(32, activation='relu'), Dense(1) # Output layer ]) # Compile the model model_google.compile(optimizer='adam', loss='mean_squared_error') # Train the model history_google = model_google.fit(X_train_google_scaled, y_train_google, validation_split=0.2, epochs=50, batch_size=32) # Predict on the test set y_pred_nn_google = model_google.predict(X_test_google_scaled) # Evaluate the performance mse_google_nn = mean_squared_error(y_test_google, y_pred_nn_google) r2_google_nn = r2_score(y_test_google, y_pred_nn_google) print("google Neural Network Performance:") print(f"Mean Squared Error: {mse_google_nn}") print(f"R-squared: {r2_google_nn}") # google Neural Network Predictions Visualization plt.figure(figsize=(10, 6)) plt.plot(y_test_google[:250].values, color="cyan", label="Actual Price") plt.plot(y_pred_google[:250], 'm--', label="Predicted Price") plt.title("google Stock Price - Actual vs Predicted (Neural Network)", color="cyan") plt.xlabel("Date", color="cyan") plt.ylabel("Price", color="cyan") plt.legend(loc="best") plt.grid(True, linestyle="--", alpha=0.7) plt.gca().set_facecolor("black") plt.gca().spines["bottom"].set_color("cyan") plt.gca().spines["top"].set_color("cyan") plt.gca().spines["left"].set_color("cyan") plt.gca().spines["right"].set_color("cyan") plt.show() import joblib # Save Linear Regression model joblib.dump(model_google, 'linear_reg_google_model.pkl') # Save Random Forest model (make sure the correct variable name is used when training the model) joblib.dump(rf_google, 'random_forest_google_model.pkl') # Save Neural Network model model_google.save('best_nn_google_model_tuned.keras') import joblib from tensorflow.keras.models import load_model # Load Linear Regression model linear_reg_google_model = joblib.load('linear_reg_google_model.pkl') # Load Random Forest model rf_google_model = joblib.load('random_forest_google_model.pkl') # Load Gradient Boosting model best_gb_google_model = joblib.load('best_gb_google_model.pkl') # Load Neural Network model best_nn_google_model = load_model('best_nn_google_model_tuned.keras') from google.colab import files # Downloading google models files.download('linear_reg_google_model.pkl') files.download('random_forest_google_model.pkl') files.download('best_gb_google_model.pkl') files.download('best_nn_google_model_tuned.keras') import matplotlib.pyplot as plt import numpy as np # Define colors cyberpunk_blue = '#00FFFF' cyberpunk_pink = '#FF1493' # This is the pink color for Gradient Boosting cyberpunk_background = '#000D0D' random_forest_color = '#FF00FF' # Magenta for Random Forest lstm_color = '#FFFF00' # Yellow for LSTM # Create subplots: 2 rows, 2 columns fig, axs = plt.subplots(2, 2, figsize=(15, 10)) fig.subplots_adjust(hspace=0.6, top=0.70) # Adjusting space between the charts and shifting top margin for title # Title for the entire figure fig.suptitle('Google Stock Price Prediction - Model Comparison', fontsize=18, color='white') # Table with model performance metrics table_data = [ ["Model", "R-squared", "Mean Squared Error"], ["Linear Regression", 0.9997, 0.3042], ["Random Forest", 0.9991, 0.8181], ["Gradient Boosting", 0.9993, 0.6612], ["LSTM Neural Network", 0.9604, 35.3952] ] # Add the table without extra space ax_table = fig.add_axes([0.1, 0.78, 0.8, 0.12]) # Shifting the table slightly lower ax_table.axis('off') table = ax_table.table(cellText=table_data, colWidths=[0.3]*3, loc='center', cellLoc='center') table.auto_set_font_size(False) table.set_fontsize(12) table.scale(1, 1.5) # Set table background to black and text to white for key, cell in table.get_celld().items(): cell.set_edgecolor('white') cell.set_text_props(color='white') cell.set_facecolor('black') # Plot 1: Linear Regression axs[0, 0].plot(np.arange(len(y_test_google[:250])), y_test_google[:250], color=cyberpunk_blue, label='Actual Price') axs[0, 0].plot(np.arange(len(y_pred_google[:250])), y_pred_google[:250], 'm--', label='Predicted Price (LR)', alpha=0.7) axs[0, 0].set_title('Linear Regression', fontsize=12, color='white') axs[0, 0].set_xlabel('Date', fontsize=10, color='white') axs[0, 0].set_ylabel('Price', fontsize=10, color='white') axs[0, 0].legend(loc='upper left') axs[0, 0].grid(True, linestyle='--', alpha=0.7) axs[0, 0].set_facecolor(cyberpunk_background) # Plot 2: Random Forest (Magenta) axs[0, 1].plot(np.arange(len(y_test_google[:250])), y_test_google[:250], color=cyberpunk_blue, label='Actual Price') axs[0, 1].plot(np.arange(len(y_pred_rf_google[:250])), y_pred_rf_google[:250], color=random_forest_color, label='Predicted Price (RF)', alpha=0.7) axs[0, 1].set_title('Random Forest', fontsize=12, color='white') axs[0, 1].set_xlabel('Date', fontsize=10, color='white') axs[0, 1].set_ylabel('Price', fontsize=10, color='white') axs[0, 1].legend(loc='upper left') axs[0, 1].grid(True, linestyle='--', alpha=0.7) axs[0, 1].set_facecolor(cyberpunk_background) # Plot 3: Gradient Boosting (Pink) axs[1, 0].plot(np.arange(len(y_test_google[:250])), y_test_google[:250], color=cyberpunk_blue, label='Actual Price') axs[1, 0].plot(np.arange(len(y_pred_gb_google[:250])), y_pred_gb_google[:250], color=cyberpunk_pink, label='Predicted Price (GB)', alpha=0.7) # Pink color axs[1, 0].set_title('Gradient Boosting', fontsize=12, color='white') axs[1, 0].set_xlabel('Date', fontsize=10, color='white') axs[1, 0].set_ylabel('Price', fontsize=10, color='white') axs[1, 0].legend(loc='upper left') axs[1, 0].grid(True, linestyle='--', alpha=0.7) axs[1, 0].set_facecolor(cyberpunk_background) # Plot 4: LSTM Neural Network (Yellow) axs[1, 1].plot(np.arange(len(y_test_google[:250])), y_test_google[:250], color=cyberpunk_blue, label='Actual Price') axs[1, 1].plot(np.arange(len(y_pred_nn_google[:250])), y_pred_nn_google[:250], color=lstm_color, label='Predicted Price (NN)', alpha=0.7) axs[1, 1].set_title('LSTM Neural Network', fontsize=12, color='white') axs[1, 1].set_xlabel('Date', fontsize=10, color='white') axs[1, 1].set_ylabel('Price', fontsize=10, color='white') axs[1, 1].legend(loc='upper left') axs[1, 1].grid(True, linestyle='--', alpha=0.7) axs[1, 1].set_facecolor(cyberpunk_background) # Display the final dashboard plt.show()