Information about the problem is at http://ti.arc.nasa.gov/tech/dash/pcoe/prognostic-data-repository/publications/#turbofan and original data is at http://ti.arc.nasa.gov/tech/dash/pcoe/prognostic-data-repository/#turbofan
The data was originally generated using the Commercial Modular Aero-Propulsion System Simulations (C-MAPPS) system.
The approach used in the turbofan engine degradation dataset was then used in the PHM08 challenge. Information about other research on the C-MAPSS data is available at https://www.phmsociety.org/sites/phmsociety.org/files/phm_submission/2014/phmc_14_063.pdf
import sys
import h2o
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from h2o.utils.shared_utils import _locate
import numpy as np
import pandas as pd
import seaborn as sns
import pykalman as pyk
sns.set()
doGridSearch = True
doKalmanSmoothing = False #unrelated to h2o, set true for demo
# Input files don't have column names
dependent_vars = ['RemainingUsefulLife']
index_columns_names = ["UnitNumber","Cycle"]
operational_settings_columns_names = ["OpSet"+str(i) for i in range(1,4)]
sensor_measure_columns_names =["SensorMeasure"+str(i) for i in range(1,22)]
input_file_column_names = index_columns_names + operational_settings_columns_names + sensor_measure_columns_names
# And we are going to add these columns
kalman_smoothed_mean_columns_names =["SensorMeasureKalmanMean"+str(i) for i in range(1,22)]
train = pd.read_csv("http://h2o-public-test-data.s3.amazonaws.com/bigdata/laptop/CMAPSSData/train_FD001.txt", sep=r"\s*", header=None,
names=input_file_column_names, engine='python')
test = pd.read_csv("http://h2o-public-test-data.s3.amazonaws.com/bigdata/laptop/CMAPSSData/test_FD001.txt", sep=r"\s*", header=None,
names=input_file_column_names, engine='python')
test_rul = pd.read_csv("http://h2o-public-test-data.s3.amazonaws.com/bigdata/laptop/CMAPSSData/RUL_FD001.txt", header=None, names=['RemainingUsefulLife'])
test_rul.index += 1 # set the index to be the unit number in the test data set
test_rul.index.name = "UnitNumber"
This puts all data on the same basis for supervised training
# Calculate the remaining useful life for each training sample based on last measurement being zero remaining
grouped_train = train.groupby('UnitNumber', as_index=False)
useful_life_train = grouped_train.agg({'Cycle' : np.max })
useful_life_train.rename(columns={'Cycle': 'UsefulLife'}, inplace=True)
train_wfeatures = pd.merge(train, useful_life_train, on="UnitNumber")
train_wfeatures["RemainingUsefulLife"] = -(train_wfeatures.UsefulLife - train_wfeatures.Cycle)
train_wfeatures.drop('UsefulLife', axis=1, inplace=True)
grouped_test = test.groupby('UnitNumber', as_index=False)
useful_life_test = grouped_test.agg({'Cycle' : np.max })
useful_life_test.rename(columns={'Cycle': 'UsefulLife'}, inplace=True)
test_wfeatures = pd.merge(test, useful_life_test, on="UnitNumber")
test_wfeatures["RemainingUsefulLife"] = -(test_wfeatures.UsefulLife - test_wfeatures.Cycle)
test_wfeatures.drop('UsefulLife', axis=1, inplace=True)
Look at how the sensor measures evolve over time (first column) as well as how they relate to each other for a subset of the units.
These features were the top 3 and bottom 2 most important sensor features as discovered by H2O's GBM, later in the notebook.
sns.set_context("notebook", font_scale=1.5)
p = sns.pairplot(train_wfeatures.query('UnitNumber < 10'),
vars=["RemainingUsefulLife", "SensorMeasure4", "SensorMeasure3",
"SensorMeasure9", "SensorMeasure8", "SensorMeasure13"], size=10,
hue="UnitNumber", palette=sns.color_palette("husl", 9));
sns.plt.show()
Kalman parameters were determined using EM algorithm and then those parameters are used for smoothing the signal data.
This is applied repeatedly to each Unit, in both the training and test set.
kalman_smoothed_mean_columns_names =["SensorMeasureKalmanMean"+str(i) for i in range(1,22)]
def calcSmooth(measures):
kf = pyk.KalmanFilter(initial_state_mean=measures[0], n_dim_obs=measures.shape[1])
(smoothed_state_means, smoothed_state_covariances) = kf.em(measures).smooth(measures)
return smoothed_state_means
def filterEachUnit(df):
dfout = df.copy()
for newcol in kalman_smoothed_mean_columns_names:
dfout[newcol] = np.nan
for unit in dfout.UnitNumber.unique():
sys.stdout.write('\rProcessing Unit: %d' % unit)
sys.stdout.flush()
unitmeasures = dfout[dfout.UnitNumber == unit][sensor_measure_columns_names]
smoothed_state_means = calcSmooth( np.asarray( unitmeasures ) )
dfout.loc[dfout.UnitNumber == unit, kalman_smoothed_mean_columns_names] = smoothed_state_means
sys.stdout.write('\rProcessing Unit: %d' % unit)
sys.stdout.flush()
sys.stdout.write('\rFinished\n')
sys.stdout.flush()
return dfout
Helps so preprocessing only has to be done once.
# Get picky about the order of output columns
test_output_cols = index_columns_names + operational_settings_columns_names + sensor_measure_columns_names + \
kalman_smoothed_mean_columns_names
train_output_cols = test_output_cols + dependent_vars
if doKalmanSmoothing:
train_wkalman = filterEachUnit(train_wfeatures)
test_wkalman = filterEachUnit(test_wfeatures)
train_output = train_wkalman[train_output_cols]
test_output = test_wkalman[test_output_cols]
# Output the files, so we don't have to do the preprocessing again.
if doKalmanSmoothing:
train_output.to_csv("train_FD001_preprocessed.csv", index=False)
test_output.to_csv("test_FD001_preprocessed.csv", index=False)
test_rul.to_csv("rul_FD001_preprocessed.csv", index=True)
h2o.init()
H2O cluster uptime: | 6 hours 33 minutes 13 seconds 411 milliseconds |
H2O cluster version: | 3.5.0.99999 |
H2O cluster name: | Kevin |
H2O cluster total nodes: | 1 |
H2O cluster total memory: | 3.54 GB |
H2O cluster total cores: | 8 |
H2O cluster allowed cores: | 8 |
H2O cluster healthy: | True |
H2O Connection ip: | 127.0.0.1 |
H2O Connection port: | 54321 |
#Pull Kalman-smoothed data if generated locally, or source from AWS
if doKalmanSmoothing:
train_hex = h2o.import_file(_locate("train_FD001_preprocessed.csv"))
test_hex = h2o.import_file(_locate("test_FD001_preprocessed.csv"))
else:
train_hex = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/bigdata/laptop/CMAPSSData/train_FD001_preprocessed.csv")
test_hex = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/bigdata/laptop/CMAPSSData/test_FD001_preprocessed.csv")
Parse Progress: [##################################################] 100% Imported http://h2o-public-test-data.s3.amazonaws.com/bigdata/laptop/CMAPSSData/train_FD001_preprocessed.csv. Parsed 20,631 rows and 48 cols Parse Progress: [##################################################] 100% Imported http://h2o-public-test-data.s3.amazonaws.com/bigdata/laptop/CMAPSSData/test_FD001_preprocessed.csv. Parsed 13,096 rows and 47 cols
Use the operational settings and Kalman smoothed mean states as the independent features
Setup a fold column to great cross validation models from 90 units and cross validating on 10 units. This creates a 10-fold cross validation. The cross validation models are then used to create an ensemble model for predictions
xCols= operational_settings_columns_names + kalman_smoothed_mean_columns_names
yCol = dependent_vars
foldCol = "UnitNumberMod10"
train_hex[foldCol] = train_hex["UnitNumber"] % 10
def trainGLM(x, y, fold_column, training_frame, alpha=0.5, penalty=1e-5):
model = H2OGeneralizedLinearEstimator(family = "gaussian",alpha = [alpha], Lambda = [penalty])
model.train(x=x, y=y, training_frame=training_frame, fold_column=fold_column)
return model
def gridSearchGLM(x, y, fold_column, training_frame, alphas = [0,0.5,1], penalties=np.logspace(-3,0,num=4)):
results = []
for alpha in alphas:
for penalty in penalties:
results.append( trainGLM(x, y, fold_column, training_frame, alpha, penalty) )
return results
if doGridSearch:
glmModels = gridSearchGLM(xCols, yCol, foldCol, train_hex)
else:
# this is used to speed up the demonstration by just building the single model previously found
glmModels = [ trainGLM(xCols, yCol, foldCol, train_hex, alpha=1, penalty=0.01 )]
glm Model Build Progress: [##################################################] 100% glm Model Build Progress: [##################################################] 100% glm Model Build Progress: [##################################################] 100% glm Model Build Progress: [##################################################] 100% glm Model Build Progress: [##################################################] 100% glm Model Build Progress: [##################################################] 100% glm Model Build Progress: [##################################################] 100% glm Model Build Progress: [##################################################] 100% glm Model Build Progress: [##################################################] 100% glm Model Build Progress: [##################################################] 100% glm Model Build Progress: [##################################################] 100% glm Model Build Progress: [##################################################] 100%
Uses model with lowest MSE on the cross validation data.
This is a reasonable substitute for using the final scoring method.
def extractBestModel(models):
bestMse = models[0].mse(xval=True)
result = models[0]
for model in models:
if model.mse(xval=True) < bestMse:
bestMse = model.mse(xval=True)
result = model
return result
bestModel = extractBestModel(glmModels)
bestModel
Model Details ============= H2OGeneralizedLinearEstimator : Generalized Linear Model Model Key: GLM_model_python_1445965974785_270 GLM Model: summary
family | link | regularization | number_of_predictors_total | number_of_active_predictors | number_of_iterations | training_frame | |
gaussian | identity | Ridge ( lambda = 0.01 ) | 17 | 18 | 1 | Key_Frame__http___h2o_public_test_data_s3_amazonaws_com_bigdata_laptop_CMAPSSData_train_FD001_preprocessed.hex |
ModelMetricsRegressionGLM: glm ** Reported on train data. ** MSE: 1907.65887465 R^2: 0.597910247255 Mean Residual Deviance: 1907.65887465 Null degrees of freedom: 20630 Residual degrees of freedom: 20613 Null deviance: 97880908.3648 Residual deviance: 39356910.2429 AIC: 214425.224563 ModelMetricsRegressionGLM: glm ** Reported on cross-validation data. ** MSE: 1977.53644453 R^2: 0.583181694279 Mean Residual Deviance: 1977.53644453 Null degrees of freedom: 20630 Residual degrees of freedom: 20613 Null deviance: 98171005.0908 Residual deviance: 40798554.387 AIC: 215167.426437 Scoring History:
timestamp | duration | iteration | log_likelihood | objective | |
2015-10-27 16:46:38 | 0.000 sec | 0 | 48955702.8 | 2372.9 |
Extract the 'best' model using the same approach as with GLM.
def trainGBM(x, y, fold_column, training_frame, learning_rate=0.1, ntrees=50, max_depth=5):
model = H2OGradientBoostingEstimator(distribution = "gaussian",
learn_rate=learning_rate, ntrees=ntrees, max_depth=max_depth)
model.train(x=x, y=y, training_frame=training_frame, fold_column=fold_column)
return model
def gridSearchGBM(x, y, fold_column, training_frame, learning_rates = [0.1,0.03,0.01], ntrees=[10,30,100,300], max_depth=[1,3,5]):
results = []
for learning_rate in learning_rates:
for ntree in ntrees:
for depth in max_depth:
print "GBM: {learning rate: "+str(learning_rate)+"},{ntrees: "+str(ntree)+"},{max_depth: "+str(depth)+"}"
results.append( trainGBM(x, y, fold_column, training_frame, learning_rate=learning_rate, ntrees=ntree, max_depth=depth) )
return results
if doGridSearch:
#bmModels = gridSearchGBM(xCols, yCol, foldCol, train_hex,\
# learning_rates=[0.03,0.01,0.003], ntrees=[100,300,500], max_depth=[1,3,5])
#run the below line for fast demo
gbmModels = gridSearchGBM(xCols, yCol, foldCol, train_hex, learning_rates=[0.03,0.01], ntrees=[50,200], max_depth=[2,5])
else:
gbmModels = [trainGBM(xCols, yCol, foldCol, train_hex, \
ntrees=300, max_depth=5)]
GBM: {learning rate: 0.03},{ntrees: 50},{max_depth: 2} gbm Model Build Progress: [##################################################] 100% GBM: {learning rate: 0.03},{ntrees: 50},{max_depth: 5} gbm Model Build Progress: [##################################################] 100% GBM: {learning rate: 0.03},{ntrees: 200},{max_depth: 2} gbm Model Build Progress: [##################################################] 100% GBM: {learning rate: 0.03},{ntrees: 200},{max_depth: 5} gbm Model Build Progress: [##################################################] 100% GBM: {learning rate: 0.01},{ntrees: 50},{max_depth: 2} gbm Model Build Progress: [##################################################] 100% GBM: {learning rate: 0.01},{ntrees: 50},{max_depth: 5} gbm Model Build Progress: [##################################################] 100% GBM: {learning rate: 0.01},{ntrees: 200},{max_depth: 2} gbm Model Build Progress: [##################################################] 100% GBM: {learning rate: 0.01},{ntrees: 200},{max_depth: 5} gbm Model Build Progress: [##################################################] 100%
bestGbmModel = extractBestModel(gbmModels)
Best model had depth 5, learning rate 0.01, and 300 trees
bestGbmModel.params
{u'balance_classes': {'actual': False, 'default': False}, u'build_tree_one_node': {'actual': False, 'default': False}, u'checkpoint': {'actual': None, 'default': None}, u'class_sampling_factors': {'actual': None, 'default': None}, u'col_sample_rate': {'actual': 1.0, 'default': 1.0}, u'distribution': {'actual': u'gaussian', 'default': u'AUTO'}, u'fold_assignment': {'actual': u'AUTO', 'default': u'AUTO'}, u'fold_column': {'actual': {u'__meta': {u'schema_name': u'ColSpecifierV3', u'schema_type': u'VecSpecifier', u'schema_version': 3}, u'column_name': u'UnitNumberMod10', u'is_member_of_frames': None}, 'default': None}, u'ignore_const_cols': {'actual': True, 'default': True}, u'ignored_columns': {'actual': [u'SensorMeasure21', u'SensorMeasure20', u'SensorMeasure8', u'SensorMeasure9', u'SensorMeasure4', u'SensorMeasure5', u'SensorMeasure6', u'SensorMeasure7', u'SensorMeasure1', u'SensorMeasure2', u'SensorMeasure3', u'SensorMeasure16', u'SensorMeasure17', u'SensorMeasure14', u'SensorMeasure15', u'SensorMeasure12', u'SensorMeasure13', u'SensorMeasure10', u'SensorMeasure11', u'SensorMeasure18', u'SensorMeasure19', u'UnitNumber', u'Cycle'], 'default': None}, u'keep_cross_validation_predictions': {'actual': False, 'default': False}, u'learn_rate': {'actual': 0.03, 'default': 0.1}, u'max_after_balance_size': {'actual': 5.0, 'default': 5.0}, u'max_confusion_matrix_size': {'actual': 20, 'default': 20}, u'max_depth': {'actual': 5, 'default': 5}, u'min_rows': {'actual': 10.0, 'default': 10.0}, u'model_id': {'actual': None, 'default': None}, u'nbins': {'actual': 20, 'default': 20}, u'nbins_cats': {'actual': 1024, 'default': 1024}, u'nbins_top_level': {'actual': 1024, 'default': 1024}, u'nfolds': {'actual': 0, 'default': 0}, u'ntrees': {'actual': 200, 'default': 50}, u'offset_column': {'actual': None, 'default': None}, u'r2_stopping': {'actual': 0.999999, 'default': 0.999999}, u'response_column': {'actual': {u'__meta': {u'schema_name': u'ColSpecifierV3', u'schema_type': u'VecSpecifier', u'schema_version': 3}, u'column_name': u'RemainingUsefulLife', u'is_member_of_frames': None}, 'default': None}, u'sample_rate': {'actual': 1.0, 'default': 1.0}, u'score_each_iteration': {'actual': False, 'default': False}, u'seed': {'actual': 676607941053184637L, 'default': -4954682849530948794L}, u'training_frame': {'actual': {u'URL': u'/3/Frames/Key_Frame__http___h2o_public_test_data_s3_amazonaws_com_bigdata_laptop_CMAPSSData_train_FD001_preprocessed.hex', u'__meta': {u'schema_name': u'FrameKeyV3', u'schema_type': u'Key<Frame>', u'schema_version': 3}, u'name': u'Key_Frame__http___h2o_public_test_data_s3_amazonaws_com_bigdata_laptop_CMAPSSData_train_FD001_preprocessed.hex', u'type': u'Key<Frame>'}, 'default': None}, u'tweedie_power': {'actual': 1.5, 'default': 1.5}, u'validation_frame': {'actual': None, 'default': None}, u'weights_column': {'actual': None, 'default': None}}
Best GBM Model reported MSE on cross validation data as 1687, an improvement from GLM of 1954.
bestGbmModel
Model Details ============= H2OGradientBoostingEstimator : Gradient Boosting Machine Model Key: GBM_model_python_1445965974785_298 Model Summary:
number_of_trees | model_size_in_bytes | min_depth | max_depth | mean_depth | min_leaves | max_leaves | mean_leaves | |
200.0 | 79128.0 | 5.0 | 5.0 | 5.0 | 13.0 | 32.0 | 28.575 |
ModelMetricsRegression: gbm ** Reported on train data. ** MSE: 1095.13677598 R^2: 0.769170850551 Mean Residual Deviance: 1095.13677598 ModelMetricsRegression: gbm ** Reported on cross-validation data. ** MSE: 1694.88676263 R^2: 0.64275761858 Mean Residual Deviance: 1694.88676263 Scoring History:
timestamp | duration | number_of_trees | training_MSE | training_deviance | |
2015-10-27 16:49:41 | 1 min 15.434 sec | 1.0 | 4559.7 | 4559.7 | |
2015-10-27 16:49:41 | 1 min 15.465 sec | 2.0 | 4385.4 | 4385.4 | |
2015-10-27 16:49:41 | 1 min 15.491 sec | 3.0 | 4222.0 | 4222.0 | |
2015-10-27 16:49:41 | 1 min 15.517 sec | 4.0 | 4068.1 | 4068.1 | |
2015-10-27 16:49:41 | 1 min 15.543 sec | 5.0 | 3923.3 | 3923.3 | |
--- | --- | --- | --- | --- | --- |
2015-10-27 16:49:44 | 1 min 19.311 sec | 143.0 | 1166.6 | 1166.6 | |
2015-10-27 16:49:44 | 1 min 19.338 sec | 144.0 | 1165.3 | 1165.3 | |
2015-10-27 16:49:44 | 1 min 19.365 sec | 145.0 | 1163.8 | 1163.8 | |
2015-10-27 16:49:44 | 1 min 19.393 sec | 146.0 | 1163.0 | 1163.0 | |
2015-10-27 16:49:46 | 1 min 20.789 sec | 200.0 | 1095.1 | 1095.1 |
Variable Importances:
variable | relative_importance | scaled_importance | percentage |
SensorMeasureKalmanMean4 | 709743360.0 | 1.0 | 0.6 |
SensorMeasureKalmanMean3 | 172408064.0 | 0.2 | 0.1 |
SensorMeasureKalmanMean9 | 126265464.0 | 0.2 | 0.1 |
SensorMeasureKalmanMean14 | 50092948.0 | 0.1 | 0.0 |
SensorMeasureKalmanMean6 | 44630596.0 | 0.1 | 0.0 |
SensorMeasureKalmanMean11 | 30628940.0 | 0.0 | 0.0 |
SensorMeasureKalmanMean17 | 28122880.0 | 0.0 | 0.0 |
SensorMeasureKalmanMean21 | 25222878.0 | 0.0 | 0.0 |
SensorMeasureKalmanMean2 | 20427146.0 | 0.0 | 0.0 |
SensorMeasureKalmanMean7 | 17334488.0 | 0.0 | 0.0 |
SensorMeasureKalmanMean20 | 17059280.0 | 0.0 | 0.0 |
SensorMeasureKalmanMean12 | 13289842.0 | 0.0 | 0.0 |
SensorMeasureKalmanMean8 | 7374711.5 | 0.0 | 0.0 |
SensorMeasureKalmanMean15 | 5707966.0 | 0.0 | 0.0 |
SensorMeasureKalmanMean13 | 5684577.5 | 0.0 | 0.0 |
OpSet1 | 242252.4 | 0.0 | 0.0 |
OpSet2 | 170719.0 | 0.0 | 0.0 |
See how well the models do predicting on the training set. Should be pretty good, but often worth a check.
Predictions are an ensemble of the 10-fold cross validation models.
train_hex["weights"] = 1
allModels = bestGbmModel.xvals
pred = sum([model.predict(train_hex) for model in allModels]) / len(allModels)
pred["actual"] = train_hex["RemainingUsefulLife"]
pred["unit"] = train_hex["UnitNumber"]
Ideally all points would be on the diagonal, indication prediction from data matched exactly the actual.
Also, it is important that the prediction gets more accurate the closer it gets to no useful life remaining.
Looking at a sample of the first 12 units.
Moved predictions from H2O to Python Pandas for plotting using Seaborn.
scored_df = pred.as_data_frame(use_pandas=True)
sns.set_context("notebook", font_scale=3)
g=sns.lmplot(x="actual",y="predict",hue="unit",col="unit",data=scored_df[scored_df.unit < 13],col_wrap=3,fit_reg=False, size=10)
ticks = np.linspace(-300,100, 5)
g = (g.set_axis_labels("Remaining Useful Life", "Predicted Useful Life")
.set(xlim=(-325, 125), ylim=(-325, 125),
xticks=ticks, yticks=ticks))
np.linspace(-300,100, 5)
array([-300., -200., -100., 0., 100.])
testPreds = sum([model.predict(test_hex) for model in allModels]) / len(allModels)
Append the original index information (Cycle and UnitNumber) to the predicted values so we have them later.
testPreds["Cycle"] = test_hex["Cycle"]
testPreds["UnitNumber"] = test_hex["UnitNumber"]
Move the predictions over to Python Pandas for final analysis and scoring
testPreds_df = testPreds.as_data_frame(use_pandas=True)
Load up the actual Remaining Useful Life information.
if doKalmanSmoothing:
actual_RUL = pd.read_csv(_locate("rul_FD001_preprocessed.csv"))
else:
actual_RUL = pd.read_csv("http://h2o-public-test-data.s3.amazonaws.com/bigdata/laptop/CMAPSSData/rul_FD001_preprocessed.csv")
The final scoring used in the competition is based on a single value per unit. We extract the last three predictions and use the mean of those (simple aggregation) and put the prediction back from remaining useful life in T-minus format to cycles remaining (positive).
def aggfunc(x):
return np.mean( x.order().tail(3) )
grouped_by_unit_preds = testPreds_df.groupby("UnitNumber", as_index=False)
predictedRUL = grouped_by_unit_preds.agg({'predict' : aggfunc })
predictedRUL.predict = -predictedRUL.predict
Add the prediction to the actual data frame, and use the scoring used in the PHMO8 competition (more penality for predicting more useful life than there is actual).
final = pd.concat([actual_RUL, predictedRUL.predict], axis=1)
def rowScore(row):
d = row.predict-row.RemainingUsefulLife
return np.exp( -d/10 )-1 if d < 0 else np.exp(d/13)-1
rowScores = final.apply(rowScore, axis=1)
This is the final score using PHM08 method of scoring.
sum(rowScores)
1174.2997365847225
Some things that should ideally would be true:
sns.set_context("notebook", font_scale=1.25)
sns.regplot("RemainingUsefulLife", "predict", data=final, fit_reg=False);