In [1]:
import qiskit

print(qiskit.__version__)
print(qiskit.__qiskit_version__)
0.14.1
{'qiskit-terra': '0.14.1', 'qiskit-aer': '0.5.2', 'qiskit-ignis': '0.3.0', 'qiskit-ibmq-provider': '0.7.2', 'qiskit-aqua': '0.7.1', 'qiskit': '0.19.3'}
In [1]:
import os
import subprocess
import time
import pickle
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib import pyplot as plt
from sklearn import preprocessing
from tqdm.notebook import tqdm  # jupyter用
# from tqdm import tqdm  # jupyter以外用

from qiskit import BasicAer
from qiskit.circuit.library import ZZFeatureMap
from qiskit.ml.datasets import ad_hoc_data, breast_cancer
from qiskit.aqua import aqua_globals, QuantumInstance
from qiskit.aqua.utils import split_dataset_to_data_and_labels, map_label_to_class_name
from qiskit.aqua.algorithms import SklearnSVM
from qiskit.aqua.algorithms import QSVM

prepare mesh for prediction

In [21]:
feature_dim = 2  # 特徴量の数
training_dataset_size = 20
testing_dataset_size = 10

# 予測データの設定
size = 50  # 50x50のメッシュを分類によって色分けし、擬似的に境界面を可視化する
mesh_list = [[2*i/size-1, 2*j/size-1] for i in range(size+1) for j in range(size+1)]

# 量子コンピュータのパラメータ設定
shots = 1024
seed = 10598
In [22]:
def eval_model(pred_label, print_mode=True):
    test_label = ["A"]*testing_dataset_size + ["B"]*testing_dataset_size
    
    accuracy = sum([x == y for x, y in zip(pred_label,test_label)])/len(test_label)
    precision = \
        sum([ x == y for x, y in zip(pred_label,test_label) if x == "A"])/sum([x == "A" for x in pred_label])
    recall = \
        sum([ x == y for x, y in zip(pred_label,test_label) if y == "A"])/sum([y == "A" for y in test_label])
    specificity = \
        sum([ x == y for x, y in zip(pred_label,test_label) if y == "B"])/sum([y == "B" for y in test_label])
    f1 = 2*recall*precision/(recall + precision)
    eval_dict = {"accuracy":accuracy, "precision": precision, "recall": recall, "specificity": specificity, "F1-measure":f1}
    if print_mode:
        print("result: ", pred_label)
        print("truth : ", test_label)
        eval_dict_print = {k:round(v,2) for k,v in eval_dict.items()}
        print(eval_dict_print)
    else:
        return eval_dict
    

define visualization

In [23]:
def heatmap(pred_list, size=50):
    mat = np.flipud(pred_list.reshape(size+1, size+1, order='F'))
    centers = [-1, 1, -1, 1]
    dx, = np.diff(centers[:2])/(size)
    dy, = -np.diff(centers[2:])/(size)
    extent = [centers[0]-dx/2, centers[1]+dx/2, centers[2]+dy/2, centers[3]-dy/2]
    cmap = mpl.colors.ListedColormap(['orange', 'cyan'])
    # ヒートマップ表示
    plt.imshow(mat, interpolation='nearest', vmin=0, vmax=1, cmap=cmap, extent=extent)


def scatter_data(train_for_pred, test_for_pred, train_result, test_result,
                 yshift=-0.155, print_index=False):
    dataset_dict = {"train": train_for_pred, "test": test_for_pred}
    result_dict = {"train":train_result, "test":test_result}
    marker_dict = {"train": "o", "test": "s"}
    
    for data_type in ["train", "test"]:
        data_num_half = int(len(dataset_dict[data_type])/2) # ラベルA/Bのデータ数が1:1と仮定とする
        for label in ["A", "B"]:  
            if label == "A":
                (plot_data, color) = dataset_dict[data_type][:data_num_half], "red"
            elif label == "B":
                (plot_data, color) = dataset_dict[data_type][data_num_half:], "blue"
            plt.plot(plot_data[:,0], plot_data[:,1], marker_dict[data_type], color=color, markersize=10)
        
        # 誤分類を×マークでプロット
        for i, pred_label in enumerate(result_dict[data_type]):
            if (i < data_num_half and pred_label != 0)\
              or (i >= data_num_half and pred_label != 1): 
                if print_index:
                    plt.text(dataset_dict[data_type][i][0], dataset_dict[data_type][i][1], str(i), 
                            color="white", size=15, fontweight='bold')
                # ↓ x方向は自動、y方向は手動にてプロットの位置の微調整が現状ベスト
                plt.text(dataset_dict[data_type][i][0], dataset_dict[data_type][i][1] + yshift, "×", 
                         horizontalalignment='center', color="white", size=15, fontweight='bold')
        plt.axis('off')
        plt.title("Classification Boundary", size=15)

prepare datasets_Breast_cancer

In [24]:
sample_Total, training_input, test_input, class_labels = breast_cancer(
    training_size=training_dataset_size, test_size=testing_dataset_size,
    n=feature_dim, plot_data=True  # ,gap=0.3 # breast_cancer使用時はgapパラメータを削除
)

Classical SVM

In [25]:
result = SklearnSVM(training_input, test_input, mesh_list).run()
print("kernel matrix during the training:")
kernel_matrix = result['kernel_matrix_training']
img = plt.imshow(np.asmatrix(kernel_matrix), interpolation='nearest', origin='upper', cmap='bone_r')
plt.show()

print("testing success ratio: ", result['testing_accuracy'])
kernel matrix during the training:
testing success ratio:  0.85
In [26]:
# モデル評価

# 誤分類チェックのためにtraining/testデータを予測データとして利用
(train_for_pred, _), _ = split_dataset_to_data_and_labels(training_input)
(test_for_pred, _), _ = split_dataset_to_data_and_labels(test_input)
train_result = SklearnSVM(training_input, test_input, train_for_pred).run()
test_result = SklearnSVM(training_input, test_input, test_for_pred).run()

eval_model(test_result["predicted_classes"])
result:  ['B', 'B', 'A', 'A', 'A', 'A', 'A', 'A', 'B', 'A', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B']
truth :  ['A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B']
{'accuracy': 0.85, 'precision': 1.0, 'recall': 0.7, 'specificity': 1.0, 'F1-measure': 0.82}
In [27]:
plt.figure(figsize=(7, 7))               
heatmap(result["predicted_labels"])
scatter_data(train_for_pred, test_for_pred, train_result["predicted_labels"], test_result["predicted_labels"],yshift=-0.014)
plt.show()

# 赤がA、青がBのラベル。●が訓練データで、■がテストデータ

Quantum SVM

In [28]:
%%time
backend = BasicAer.get_backend('qasm_simulator')
feature_map = ZZFeatureMap(feature_dim, reps=2)
svm = QSVM(feature_map, training_input, test_input, None)  # the data for prediction can be fed later.
svm.random_seed = seed
quantum_instance = QuantumInstance(backend, shots=shots, seed_simulator=seed, seed_transpiler=seed)
result = svm.run(quantum_instance)
CPU times: user 45.4 s, sys: 2.45 s, total: 47.9 s
Wall time: 4min 1s
In [29]:
print("kernel matrix during the training:")
kernel_matrix = result['kernel_matrix_training']
img = plt.imshow(np.asmatrix(kernel_matrix),interpolation='nearest',origin='upper',cmap='bone_r')
plt.show()

print("testing success ratio: ", result['testing_accuracy'])
kernel matrix during the training:
testing success ratio:  0.8
In [30]:
%%time
train_result = svm.predict(train_for_pred)
test_result = svm.predict(test_for_pred)

# モデル評価
eval_input = ["A" if x == 0 else "B" for x in test_result]
eval_model(eval_input)
result:  ['A', 'B', 'A', 'A', 'A', 'A', 'A', 'A', 'B', 'A', 'B', 'B', 'B', 'B', 'A', 'A', 'B', 'B', 'B', 'B']
truth :  ['A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B']
{'accuracy': 0.8, 'precision': 0.8, 'recall': 0.8, 'specificity': 0.8, 'F1-measure': 0.8}
CPU times: user 1min 1s, sys: 2.29 s, total: 1min 4s
Wall time: 4min 34s
In [31]:
# 大量の予測データセットに対して実行する場合、terminal上で複数タブにて同時に実行する方が(若干)早い
# ただし、並列化するほど1つあたりの処理が大幅に遅くなるので、劇的な高速化はできない
(input_file_num, iter_num, input_size) = (3, 51, 17)
mesh_list_tmp = np.array(mesh_list).reshape(input_file_num, iter_num, input_size, feature_dim)


code_string = '''
import os
import pickle
from tqdm import tqdm

dir = os.path.dirname(os.path.abspath(__file__))
with open(dir+"/../model/model_qsvm_br_cn.pkl", "rb") as f:
    svm = pickle.load(f)

with open(dir+"/../input/input_INDEX.pkl", "rb") as f:
    input_array = pickle.load(f)

list_final = []
for epoch, epoch_array in enumerate(tqdm(input_array)):
    pred_tmp = svm.predict(epoch_array)
    print("epoch ", epoch, " has done")
    list_final.append(list(pred_tmp))
print(list_final)

with open(dir+"/../output/output_br_cn_INDEX.pkl", "wb") as f:
    pickle.dump(list_final, f)
'''

for path in ['./qsvm_terminal/' + folder for folder in ["input", "output", "script", "model"]]:
    os.makedirs(path, exist_ok=True)

with open("qsvm_terminal/model/model_qsvm_br_cn.pkl", "wb") as f:
    pickle.dump(svm, f)

for i in range(input_file_num):
    # inputはbreast_cancerとad_hoc_data共通でOK
    with open("qsvm_terminal/input/input_{}.pkl".format(i), "wb") as f:
        pickle.dump(mesh_list_tmp[i], f)

    with open("qsvm_terminal/script/predict_br_cn_{}.py".format(i), "w") as f:
        f.write(code_string.replace("INDEX", str(i)))
In [32]:
# terminal上で predict_br_cn_1.py, predict_br_cn_2.py, predict_br_cn_3.pyをそれぞれ実行する
In [33]:
mesh_predict_tmp = []
for i in range(input_file_num):
    with open("qsvm_terminal/output/output_br_cn_"+str(i)+".pkl", "rb") as f:
        mesh_predict_tmp.append(pickle.load(f))
    
mesh_predict_result = np.array(mesh_predict_tmp).reshape(-1)
In [34]:
# 時間かけてでもjupyter上で実行したい場合は以下のコードで実行可能
# (iter_num, input_size) = (51*3, 17)
# mesh_list_iter = np.array(mesh_list).reshape(iter_num, input_size, feature_dim)

# mesh_predict_tmp = []
# for i, iter_list in enumerate(tqdm(mesh_list_iter)):
#     tmp_list = svm.predict(iter_list)
#     mesh_predict_tmp.append(tmp_list)
#     print("epoch ", i, " has done ",list(tmp_list))

# print(mesh_result)
# mesh_predict_result = np.array(mesh_predict_tmp).reshape(-1)
In [35]:
plt.figure(figsize=(7, 7))               
heatmap(mesh_predict_result)
scatter_data(train_for_pred, test_for_pred, train_result, test_result ,yshift=-0.014)
plt.show()

# 赤がA、青がBのラベル。●が訓練データで、■がテストデータ

prepare dataset_ad_hoc_data

In [36]:
aqua_globals.random_seed = seed
sample_Total, training_input_unnormalized, test_input_unnormalized, class_labels = ad_hoc_data(
    training_size=training_dataset_size, 
    test_size=testing_dataset_size, 
    n=feature_dim, gap=0.3, plot_data=True
)
In [37]:
# Breast_cancerと同じスケール(-1, 1)でプロットするためにdatasetを規格化
(train_for_pred, _), _ = split_dataset_to_data_and_labels(training_input_unnormalized)
(test_for_pred, _), _ = split_dataset_to_data_and_labels(test_input_unnormalized)
dataset_array = np.vstack([train_for_pred, test_for_pred])
min_array, max_array = dataset_array.min(), dataset_array.max()
training_input_normalized = {k:(v-min_array)/(max_array-min_array)*2-1 for k,v in training_input_unnormalized.items()}
test_input_normalized = {k:(v-min_array)/(max_array-min_array)*2-1 for k,v in test_input_unnormalized.items()} 

Classical SVM

In [38]:
result = SklearnSVM(training_input_normalized, test_input_normalized, mesh_list).run()
print("kernel matrix during the training:")
kernel_matrix = result['kernel_matrix_training']
img = plt.imshow(np.asmatrix(kernel_matrix), interpolation='nearest', origin='upper', cmap='bone_r')
plt.show()

print("testing success ratio: ", result['testing_accuracy'])
kernel matrix during the training:
testing success ratio:  0.7
In [39]:
# モデル評価

# 誤分類チェックのためにtraining/testデータを予測データとして利用
(train_for_pred, _), _ = split_dataset_to_data_and_labels(training_input_normalized)
(test_for_pred, _), _ = split_dataset_to_data_and_labels(test_input_normalized)
train_result = SklearnSVM(training_input_normalized, test_input_normalized, train_for_pred).run()
test_result = SklearnSVM(training_input_normalized, test_input_normalized, test_for_pred).run()

eval_model(test_result["predicted_classes"])
result:  ['A', 'B', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'B', 'B', 'A', 'B', 'B', 'A', 'B', 'A', 'A']
truth :  ['A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B']
{'accuracy': 0.7, 'precision': 0.64, 'recall': 0.9, 'specificity': 0.5, 'F1-measure': 0.75}
In [40]:
plt.figure(figsize=(7, 7))               
heatmap(result["predicted_labels"])
scatter_data(train_for_pred, test_for_pred, train_result["predicted_labels"], test_result["predicted_labels"],yshift=-0.014)
plt.show()

# 赤がA、青がBのラベル。●が訓練データで、■がテストデータ

Quantum SVM

In [41]:
%%time
backend = BasicAer.get_backend('qasm_simulator')
feature_map = ZZFeatureMap(feature_dim, reps=2)
svm = QSVM(feature_map, training_input_normalized, test_input_normalized, None)# the data for prediction can be fed later.
svm.random_seed = seed
quantum_instance = QuantumInstance(backend, shots=shots, seed_simulator=seed, seed_transpiler=seed)
result = svm.run(quantum_instance)
CPU times: user 40 s, sys: 1.59 s, total: 41.6 s
Wall time: 3min
In [42]:
print("kernel matrix during the training:")
kernel_matrix = result['kernel_matrix_training']
img = plt.imshow(np.asmatrix(kernel_matrix),interpolation='nearest',origin='upper',cmap='bone_r')
plt.show()

print("testing success ratio: ", result['testing_accuracy'])
kernel matrix during the training:
testing success ratio:  0.55
In [43]:
%%time
train_result = svm.predict(train_for_pred)
test_result = svm.predict(test_for_pred)

# モデル評価
eval_input = ["A" if x == 0 else "B" for x in test_result]
eval_model(eval_input)
result:  ['A', 'B', 'A', 'B', 'A', 'A', 'A', 'A', 'B', 'B', 'A', 'B', 'B', 'A', 'B', 'A', 'A', 'B', 'A', 'B']
truth :  ['A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B']
{'accuracy': 0.55, 'precision': 0.55, 'recall': 0.6, 'specificity': 0.5, 'F1-measure': 0.57}
CPU times: user 1min, sys: 2.35 s, total: 1min 3s
Wall time: 4min 53s
In [44]:
# 大量の予測データセットに対して実行する場合、terminal上で複数タブにて同時に実行する方が(若干)早い
# ただし、並列化するほど1つあたりの処理が大幅に遅くなるので、劇的な高速化はできない
(input_file_num, iter_num, input_size) = (3, 51, 17) 
mesh_list_tmp = np.array(mesh_list).reshape(input_file_num, iter_num, input_size, feature_dim)


code_string = '''
import os
import pickle
from tqdm import tqdm

file_dir = os.path.dirname(os.path.abspath(__file__))
with open(file_dir+"/../model/model_qsvm_ad_hc.pkl", "rb") as f:
    svm = pickle.load(f)

with open(file_dir+"/../input/input_INDEX.pkl", "rb") as f:
    input_array = pickle.load(f)
    
list_final = []
for epoch, epoch_array in enumerate(tqdm(input_array)):
    pred_tmp  = svm.predict(epoch_array)
    print("epoch ", epoch, " has done")
    list_final.append(list(pred_tmp))
print(list_final)

with open(file_dir+"/../output/output_ad_hc_INDEX.pkl", "wb") as f:
    pickle.dump(list_final, f)
'''  

with open("qsvm_terminal/model/model_qsvm_ad_hc.pkl", "wb") as f:
    pickle.dump(svm, f)
    
for i in range(input_file_num):
    # inputはbreast_cancerとad_hoc_data共通でOK
    with open("qsvm_terminal/input/input_{}.pkl".format(i),"wb") as f:
        pickle.dump(mesh_list_tmp[i], f)
        
    with open("qsvm_terminal/script/predict_ad_hc_{}.py".format(i),"w") as f:
        f.write(code_string.replace("INDEX",str(i)))
In [45]:
# terminal上で predict_ad_hc_1.py, predict_ad_hc_2.py, predict_ad_hc_3.pyをそれぞれ実行する
In [46]:
mesh_predict_tmp = []
for i in range(input_file_num):
    with open("qsvm_terminal/output/output_ad_hc_"+str(i)+".pkl", "rb") as f:
        mesh_predict_tmp.append(pickle.load(f))
    
mesh_predict_result = np.array(mesh_predict_tmp).reshape(-1)
In [47]:
plt.figure(figsize=(7, 7))               
heatmap(mesh_predict_result)
scatter_data(train_for_pred, test_for_pred, train_result, test_result ,yshift=-0.014)
plt.show()

# 赤がA、青がBのラベル。●が訓練データで、■がテストデータ

Depth vs Model Performance & Depth vs Time

In [9]:
# jupyter上で回すとBrokenProcessPoolエラーが発生することがあるため、Terminalで実行。

# shotsやseedをjupyter上で変更する場合は注意
code_string_depth = '''
import os
import time
import pickle
import numpy as np
from qiskit import BasicAer
from qiskit.aqua.algorithms import QSVM
from qiskit.aqua import QuantumInstance
from qiskit.aqua import aqua_globals
from qiskit.circuit.library import ZZFeatureMap
from qiskit.ml.datasets import ad_hoc_data
from qiskit.aqua.utils import split_dataset_to_data_and_labels

# サンプリング設定
feature_dim = 2  # 特徴量の数
training_dataset_size = 20
testing_dataset_size = 10
shots = 1024
seed = 10598


# モデル評価
def eval_model(pred_label, print_mode=True):
    test_label = ["A"]*testing_dataset_size + ["B"]*testing_dataset_size
    
    accuracy = sum([x == y for x, y in zip(pred_label, test_label)])/len(test_label)
    precision = \
        sum([x == y for x, y in zip(pred_label, test_label) if x == "A"])/sum([x == "A" for x in pred_label])
    recall = \
        sum([x == y for x, y in zip(pred_label, test_label) if y == "A"])/sum([y == "A" for y in test_label])
    specificity = \
        sum([x == y for x, y in zip(pred_label, test_label) if y == "B"])/sum([y == "B" for y in test_label])
    f1 = 2*recall*precision/(recall + precision)
    eval_dict = {"accuracy": accuracy, "precision": precision, "recall": recall, "specificity": specificity, "F1-measure":f1}
    if print_mode:
        print("result: ", pred_label)
        print("truth : ", test_label)
        print(eval_dict)
    else:
        return eval_dict


# サンプル取得(ad_hoc_dataを使用)
aqua_globals. random_seed = seed
sample_Total, training_input_unnormalized, test_input_unnormalized, class_labels = ad_hoc_data(
    training_size=training_dataset_size, 
    test_size=testing_dataset_size, 
    n=feature_dim, gap=0.3, plot_data=False
)

# 規格化
(train_for_pred, _), _ = split_dataset_to_data_and_labels(training_input_unnormalized)
(test_for_pred, _), _ = split_dataset_to_data_and_labels(test_input_unnormalized)
dataset_array = np.vstack([train_for_pred, test_for_pred])
min_array, max_array = dataset_array.min(), dataset_array.max()
training_input_normalized = {k: (v-min_array)/(max_array-min_array)*2-1 for k, v in training_input_unnormalized.items()}
test_input_normalized = {k: (v-min_array)/(max_array-min_array)*2-1 for k, v in test_input_unnormalized.items()}

# 計算実行
dict_result = {}
for depth in range(1, 11):
    start_time = time.time()
    backend = BasicAer.get_backend('qasm_simulator')
    feature_map_depth = ZZFeatureMap(feature_dim, reps=depth)
    svm_depth = QSVM(feature_map_depth, training_input_normalized, test_input_normalized, test_for_pred)
    svm_depth.random_seed = seed
    quantum_instance = QuantumInstance(backend, shots=shots, seed_simulator=seed, seed_transpiler=seed)
    result_depth = svm_depth.run(quantum_instance)
    eval_metrics_dict = eval_model(result_depth["predicted_classes"], print_mode=False)
    print("depth: ", depth)
    eval_metrics_dict_print = {k: round(v, 2) for k, v in eval_metrics_dict.items()}
    print(eval_metrics_dict_print)
    print("--- %s seconds ---" % (round(time.time() - start_time)))
    dict_result[depth] = {"evaluation": eval_metrics_dict,
                          "time": time.time() - start_time,
                          "result": result_depth}
    
dir = os.path.dirname(os.path.abspath(__file__))
with open(dir+"/../output/depth_variator_output.pkl", "wb") as f:
    pickle.dump(dict_result, f)

'''

with open("qsvm_terminal/script/depth_variator.py", "w") as f:
        f.write(code_string_depth)
In [11]:
cmd = ["python", "qsvm_terminal/script/depth_variator.py"]

proc = subprocess.Popen(cmd, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)


for line in iter(proc.stdout.readline,b''):
    print(line.rstrip().decode("utf8"))
depth:  1
{'accuracy': 0.6, 'precision': 0.62, 'recall': 0.5, 'specificity': 0.7, 'F1-measure': 0.56}
--- 244 seconds ---
depth:  2
{'accuracy': 0.5, 'precision': 0.5, 'recall': 0.5, 'specificity': 0.5, 'F1-measure': 0.5}
--- 238 seconds ---
depth:  3
{'accuracy': 0.5, 'precision': 0.5, 'recall': 0.2, 'specificity': 0.8, 'F1-measure': 0.29}
--- 242 seconds ---
depth:  4
{'accuracy': 0.7, 'precision': 0.67, 'recall': 0.8, 'specificity': 0.6, 'F1-measure': 0.73}
--- 140 seconds ---
depth:  5
{'accuracy': 0.55, 'precision': 0.56, 'recall': 0.5, 'specificity': 0.6, 'F1-measure': 0.53}
--- 250 seconds ---
depth:  6
{'accuracy': 0.45, 'precision': 0.43, 'recall': 0.3, 'specificity': 0.6, 'F1-measure': 0.35}
--- 268 seconds ---
depth:  7
{'accuracy': 0.6, 'precision': 0.62, 'recall': 0.5, 'specificity': 0.7, 'F1-measure': 0.56}
--- 270 seconds ---
depth:  8
{'accuracy': 0.45, 'precision': 0.45, 'recall': 0.5, 'specificity': 0.4, 'F1-measure': 0.48}
--- 279 seconds ---
depth:  9
{'accuracy': 0.5, 'precision': 0.5, 'recall': 0.7, 'specificity': 0.3, 'F1-measure': 0.58}
--- 284 seconds ---
depth:  10
{'accuracy': 0.55, 'precision': 0.54, 'recall': 0.7, 'specificity': 0.4, 'F1-measure': 0.61}
--- 290 seconds ---
In [19]:
with open("qsvm_terminal/output/depth_variator_output.pkl", "rb") as f:
    result_dict = pickle.load(f)

records = [{**value["evaluation"], **{"time": value["time"]}} for value in result_dict.values()]
df = pd.DataFrame(records,index=range(1,11))
df
Out[19]:
accuracy precision recall specificity F1-measure time
1 0.60 0.625000 0.5 0.7 0.555556 248.298147
2 0.50 0.500000 0.5 0.5 0.500000 238.146821
3 0.50 0.500000 0.2 0.8 0.285714 238.769541
4 0.70 0.666667 0.8 0.6 0.727273 146.780808
5 0.55 0.555556 0.5 0.6 0.526316 246.763014
6 0.45 0.428571 0.3 0.6 0.352941 260.120211
7 0.60 0.625000 0.5 0.7 0.555556 292.442170
8 0.45 0.454545 0.5 0.4 0.476190 273.795143
9 0.50 0.500000 0.7 0.3 0.583333 282.874764
10 0.55 0.538462 0.7 0.4 0.608696 286.375107
In [20]:
plt.figure(figsize=(10,6))
for col in df.columns:
    if col == "time": continue
    plt.plot(df[col],linestyle='-', marker='o',label=col)

plt.axis('auto')
plt.ylim(0,1)    
plt.xticks(range(1,11))
plt.rcParams["legend.edgecolor"] = 'black'
plt.ylabel("Model Performance Metrics",fontsize=18)
plt.xlabel("Circuit Depth",fontsize=18)
plt.legend(bbox_to_anchor=(1, 0), loc='lower right', borderaxespad=0, fontsize=11)
plt.show()
In [21]:
plt.figure(figsize=(10,6))
plt.plot(df["time"],linestyle='-', marker='o')
plt.xticks(range(1,11))
plt.ylabel("Calclulation Time",fontsize=18)
plt.xlabel("Circuit Depth",fontsize=18)
plt.ylim(0,400)
plt.show()
In [ ]:
 
In [ ]: