#!/usr/bin/env python # coding: utf-8 # # phygnn binary classification example # # Here's an example of how to make a binary classifier using a phygnn model without a p_fun. # In[1]: import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf from rex import init_logger from phygnn import PhysicsGuidedNeuralNetwork, p_fun_dummy # In[2]: init_logger('phygnn', log_level='INFO', log_file=None) # In[3]: x1 = np.arange(500) - 250 x2 = np.arange(500) - 250 x1, x2 = np.meshgrid(x1, x2) x1 = x1.flatten() x2 = x2.flatten() x3 = x1 * x2 features = pd.DataFrame({'x1': x1, 'x2': x2}) features.head() # In[4]: y = ((x1 * x2) > 0).astype(bool).astype(float) labels = pd.DataFrame({'y': y}) labels.head() # In[5]: hidden_layers = [{'units': 16}, {'activation': 'relu'}, {'units': 16}, {'activation': 'relu'}, ] output_layer = [{'units': 1}, {'activation': 'sigmoid'}, ] # In[6]: PhysicsGuidedNeuralNetwork.seed(0) model = PhysicsGuidedNeuralNetwork(p_fun=p_fun_dummy, hidden_layers=hidden_layers, output_layer=output_layer, loss_weights=(1.0, 0.0), metric='binary_crossentropy', learning_rate=0.05, n_features=2, n_labels=1) model.fit(features, labels, features, n_batch=1, n_epoch=50) # In[7]: model.history[['training_loss', 'validation_loss']].plot() plt.ylabel('Loss') plt.show() plt.close() y_pred = model.predict(features) accuracy = 100 * (np.round(y_pred) == labels.values).sum() / len(labels) print('Accuracy: {:.2f}%'.format(accuracy)) # In[ ]: # In[ ]: