Investigation by Andreas Herten for X-Dev Blog in July 2022, see https://go.fzj.de/jsc-amdgpu-eval.
Imports and config
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.colors import LogNorm
from matplotlib.ticker import FuncFormatter, EngFormatter, LogFormatter, LogFormatterExponent, FormatStrFormatter, ScalarFormatter
sns.set()
%matplotlib inline
formatter = EngFormatter(sep="\N{THIN SPACE}", unit='B/s')
plt.rcParams.update({'text.usetex': False})
plt.rcParams.update({'font.sans-serif': ['DejaVu Sans', 'Bitstream Vera Sans', 'Computer Modern Sans Serif', 'Lucida Grande', 'Verdana', 'Geneva', 'Lucid', 'Arial', 'Helvetica', 'Avant Garde', 'sans-serif']})
Bandwidth for different message sizes
name_files = {
'scan': {
'amd': 'stream-scan-10657249-jrc0850.csv',
'nvidia': 'stream-scan-10657443-jrc0437.csv'
}
}
Read data and massage it
df_scan = {}
for vendor, filename in name_files['scan'].items():
_df = pd.read_csv(filename)
_df['Exponent NElement'] = _df.index
_df['Data Size / Byte'] = _df['Exponent NElement'] ## Initialize, to be able to use `.rpow()`
_df['Data Size / Byte'] = _df['Data Size / Byte'].rpow(2) * 2**3 # double-precision
_df.set_index('Data Size / Byte', inplace=True)
df_scan[vendor] = _df
df_scan_max = {}
df_scan_max_triad = {}
for vendor, _df in df_scan.items():
df_scan_max[vendor] = _df.loc[:, _df.columns.str.contains("Max")]
df_scan_max_triad[vendor] = df_scan_max[vendor].loc[:, df_scan_max[vendor].columns.str.contains("Triad")]
3200 * 1000 * 1000 * 8192 / 8
3276800000000.0
fig, axs = plt.subplots(ncols=2, figsize=(14,6))
plot_scan_limits = {'lin': {}, 'log': {}}
for ax, logplot in zip(axs, [False, True]):
df_scan_max['amd'].columns = df_scan_max['amd'].columns.str.rstrip(' / GiB/s')
ax=(df_scan_max['amd']*1024*1024*1024).plot(ax=ax)
if not logplot:
ax.xaxis.set_major_formatter(EngFormatter(sep="\N{THIN SPACE}", unit="B"))
ax.set_xlabel("Data Size")
rect = patches.Rectangle((0.17,0.25), 0.64, 0.48, linewidth=1, facecolor='white', alpha=0.5, transform=ax.transAxes)
ax.add_patch(rect)
ax_ins = ax.inset_axes([0.35, 0.26, 0.45, 0.45])
ax_ins=(df_scan_max['amd']*1024*1024*1024).plot(ax=ax_ins, legend=False)
ax_ins.set_xlim(0.85*ax.get_xlim()[1], 0.96*ax.get_xlim()[1])
ax_ins.set_ylim(0.89*ax.get_ylim()[1], 0.96*ax.get_ylim()[1])
ax_ins.set_xticklabels([])
ax_ins.set_xlabel(None)
ax_ins.yaxis.set_major_formatter(EngFormatter(sep="\N{THIN SPACE}", unit="B/s", places=3))
ax.indicate_inset_zoom(ax_ins, edgecolor="black")
ax.set_title(f'Linear View')
plot_scan_limits['lin']['x'] = ax.get_xlim()
ax.hlines(3200 * 1000 * 1000 * 8192 / 8 / 2, 0.1*ax.get_xlim()[1], ax.get_xlim()[1], linestyles=['dashed'], colors='C4')
ax.annotate("Peak",
(0.08*ax.get_xlim()[1], 3200 * 1000 * 1000 * 8192 / 8 / 2),
xycoords='data', color='C4',
horizontalalignment='right', verticalalignment='center'
)
plot_scan_limits['lin']['y'] = ax.get_ylim()
ax.set_xlim(plot_scan_limits['lin']['x'])
if logplot:
ax.set_xscale('log', base=2)
ax.set_yscale('log', base=2)
plot_scan_limits['log']['x'] = ax.get_xlim()
plot_scan_limits['log']['y'] = ax.get_ylim()
ax.set_title(f'Double-Logarithmic View')
ax.yaxis.set_major_formatter(EngFormatter(sep="\N{THIN SPACE}", unit="B/s", places=1))
ax.set_ylabel('Memory Bandwidth')
fig.suptitle("GPU STREAM Variant Scan for MI250 GPU", fontsize='xx-large')
disclaimer = """
Plots of variant of STREAM memory benchmark using one AMD MI250 GCD of JURECA DC Evaluation Platform. Memory size (x axis) increasing in powers of two, from 2⁰ to 2²⁸.
Values in Byte/s (1 kB = 1000 B). Peak at 1.6384 TB/s. Software versions: ROCm 5.2.0, ROCm driver 5.16.9.22.20. 192 threads per block.
Own CUDA STREAM Variant hipified and compiled with hipcc (→ github.com/AndiH/CUDA-Cpp-STREAM).
Detailed description: https://go.fzj.de/jsc-amdgpu-eval.
"""
fig.text(0.5, -0.01, disclaimer, color="darkgray", horizontalalignment="center", verticalalignment="top")
fig.tight_layout()
for ext in ['png', 'pdf', 'svg']:
fig.savefig(f"stream_scan_amd_mi250.{ext}", dpi=300, bbox_inches='tight')
fig, axs = plt.subplots(ncols=2, figsize=(14,6))
for ax, logplot in zip(axs, [False, True]):
df_scan_max['nvidia'].columns = df_scan_max['nvidia'].columns.str.rstrip(' / GiB/s')
ax=(df_scan_max['nvidia']*1024*1024*1024).plot(ax=ax)
if not logplot:
ax.xaxis.set_major_formatter(EngFormatter(sep="\N{THIN SPACE}", unit="B"))
ax.set_xlabel("Data Size")
rect = patches.Rectangle((0.17,0.25), 0.64, 0.48, linewidth=1, facecolor='white', alpha=0.5, transform=ax.transAxes)
ax.add_patch(rect)
ax_ins = ax.inset_axes([0.35, 0.26, 0.45, 0.45])
ax_ins=(df_scan_max['nvidia']*1024*1024*1024).plot(ax=ax_ins, legend=False)
ax_ins.set_xlim(0.85*ax.get_xlim()[1], 0.96*ax.get_xlim()[1])
ax_ins.set_ylim(0.836*ax.get_ylim()[1], 0.848*ax.get_ylim()[1])
ax_ins.set_xticklabels([])
ax_ins.set_xlabel(None)
ax_ins.yaxis.set_major_formatter(EngFormatter(sep="\N{THIN SPACE}", unit="B/s", places=3))
ax.indicate_inset_zoom(ax_ins, edgecolor="black")
ax.set_title(f'Linear View')
ax.set_xlim(plot_scan_limits['lin']['x'])
ax.set_ylim(plot_scan_limits['lin']['y'])
ax.hlines(1555.2 * 1000 * 1000 * 1000, 0.1*ax.get_xlim()[1], ax.get_xlim()[1], linestyles=['dashed'], colors='C4')
ax.annotate("Peak",
(0.08*ax.get_xlim()[1], 1555. * 1000 * 1000 * 1000),
xycoords='data', color='C4',
horizontalalignment='right', verticalalignment='center'
)
if logplot:
ax.set_xscale('log', base=2)
ax.set_yscale('log', base=2)
ax.set_title(f'Double-Logarithmic View')
ax.set_xlim(plot_scan_limits['log']['x'])
ax.set_ylim(plot_scan_limits['log']['y'])
ax.yaxis.set_major_formatter(EngFormatter(sep="\N{THIN SPACE}", unit="B/s", places=1))
ax.set_ylabel('Memory Bandwidth')
fig.suptitle("GPU STREAM Variant Scan for A100 GPU", fontsize='xx-large')
disclaimer = """
Plots of variant of STREAM memory benchmark using one NVIDIA A100 GPU of JURECA DC. Memory size (x axis) increasing in powers of two, from 2⁰ to 2²⁸.
Values in Byte/s (1 kB = 1000 B). Peak at 1.555 TB/s. Software versions: CUDA 11.5, CUDA driver 510.47.03. 192 threads per block.
Own CUDA STREAM Variant (→ github.com/AndiH/CUDA-Cpp-STREAM). Systematic outlier for 2²³.
Detailed description: https://go.fzj.de/jsc-amdgpu-eval.
"""
fig.text(0.5, -0.01, disclaimer, color="darkgray", horizontalalignment="center", verticalalignment="top")
fig.tight_layout()
for ext in ['png', 'pdf', 'svg']:
fig.savefig(f"stream_scan_nvidia_a100.{ext}", dpi=300, bbox_inches='tight')
Comparing max Triad bandwidth for MI250 vs. A100
fig, axs = plt.subplots(ncols=2, figsize=(14,6))
color_amd = (223/255,31/255,54/255)
color_nvidia = (118/255,185/255,0)
for ax, logplot in zip(axs, [False, True]):
ax=(df_scan_max_triad['amd'].rename(columns={'Triad (Max) / GiB/s': 'Triad MI250'})*1024*1024*1024).plot(ax=ax, marker=".", color=color_amd)
ax=(df_scan_max_triad['nvidia'].rename(columns={'Triad (Max) / GiB/s': 'Triad A100'})*1024*1024*1024).plot(ax=ax, linestyle="dashed", marker="*", color=color_nvidia)
if logplot:
ax.set_xscale('log', base=2)
ax.set_yscale('log', base=2)
if logplot == False:
ax.xaxis.set_major_formatter(EngFormatter(sep="\N{THIN SPACE}", unit="B"))
ax.set_xlabel("Data Size")
ax.annotate('MI250: {bw} TB/s'.format(bw=round((df_scan_max_triad['amd']*1024*1024*1024/1000/1000/1000/1000).iloc[-1].tolist()[0], 3)),
(df_scan_max_triad['amd']*1024*1024*1024).reset_index().iloc[-1].tolist(),
xycoords='data', xytext=(0.8, 0.7), textcoords='axes fraction',
bbox=dict(facecolor='white', alpha=0.5),
arrowprops=dict(arrowstyle="->", color='gray', connectionstyle='arc3,rad=0.1',shrinkA=5, shrinkB=10), color="gray", horizontalalignment='right', verticalalignment='top',
)
ax.annotate('A100: {bw} TB/s'.format(bw=round((df_scan_max_triad['nvidia']*1024*1024*1024/1000/1000/1000/1000).iloc[-1].tolist()[0], 3)),
(df_scan_max_triad['nvidia']*1024*1024*1024).reset_index().iloc[-1].tolist(),
xycoords='data', xytext=(0.8, 0.77), textcoords='axes fraction',
bbox=dict(facecolor='white', alpha=0.5),
arrowprops=dict(arrowstyle="->", color='gray', connectionstyle='arc3,rad=-0.1',shrinkA=5, shrinkB=10), color="gray", horizontalalignment='right', verticalalignment='top',
)
ax.yaxis.set_major_formatter(EngFormatter(sep="\N{THIN SPACE}", unit="B/s", places=1))
fig.suptitle("GPU STREAM Variant Triad Scan for MI250 GCD and A100 GPU", fontsize='xx-large')
disclaimer = """
Linear and double-logarithmic plots of variant of STREAM memory benchmark comparing one AMD MI250 GCD and one NVIDIA A100 GPU. Memory size (x axis) increasing in powers of two, from 2⁰ to 2²⁸.
Values in Byte/s (1 kB = 1000 B). Software versions: ROCm 5.2.0, ROCm driver 5.16.9.22.20; CUDA 11.5, CUDA driver 510.47.03. 192 threads per block.
Own CUDA STREAM Variant, hipified for AMD (→ github.com/AndiH/CUDA-Cpp-STREAM).
Detailed description: https://go.fzj.de/jsc-amdgpu-eval.
"""
fig.text(0.5, -0.01, disclaimer, color="darkgray", horizontalalignment="center", verticalalignment="top")
fig.tight_layout()
for ext in ['png', 'pdf', 'svg']:
fig.savefig(f"stream_scan_compare_mi250_a100.{ext}", dpi=300, bbox_inches='tight')
df_stream = {}
df_stream['amd'] = pd.read_csv("stream-10607580-jrc0850.csv")
df_stream['amd']["Data Size / MiB"] = df_stream['amd'] ["Exponent"].rpow(2) * 8 / 1024 / 1024
df_stream['amd'] = df_stream['amd'].set_index(["Data Size / MiB", "Threads per Block"])
_df_pivot.sort_index(ascending=False)
Data Size / MiB | 512.0 | 2048.0 | 8192.0 |
---|---|---|---|
Threads per Block | |||
1024 | 1194.0512 | 1190.2111 | 1135.4497 |
512 | 1241.6530 | 1241.9594 | 1187.6554 |
256 | 1262.5840 | 1260.4971 | 1210.5210 |
128 | 1258.5429 | 1251.0974 | 1191.8881 |
cols = ['Copy (Max) / GiB/s', 'Scale (Max) / GiB/s', 'Add (Max) / GiB/s', 'Triad (Max) / GiB/s']
fig, axs = plt.subplots(ncols=4, figsize=(14,4), sharey=True)
for i, (ax, col) in enumerate(zip(axs, cols)):
_df_pivot = df_stream['amd'].pivot_table(index="Threads per Block", values=col, columns="Data Size / MiB").sort_index(ascending=False)
sns.heatmap(
_df_pivot,
cmap="YlGnBu",
linewidths=3,
square=True,
fmt=".0f",
cbar_kws={'label': 'Bandwidth / GiB/s'},
annot=_df_pivot.astype(int),
vmin=df_stream['amd'][cols].min().min(),
vmax=df_stream['amd'][cols].max().max(),
ax=ax
)
ax.set_title(f'{col.split(" ")[0]}')
#ax.set_xlabel(2**int(ax.get_xlabel()))
if i > 0:
ax.set_ylabel(None)
if i < 3:
fig.get_axes()[-1].set_ylabel(None)
ax.set_xticklabels(label.get_text()[:-2] for label in ax.get_xticklabels())
fig.suptitle("GPU STREAM Variant for MI250 GPU", fontsize='xx-large')
disclaimer = """
Maps show various results of STREAM Benchmark Variants on single AMD MI250 GCD of JURECA DC Evaluation Platform.
Values in cells in GiB/s. Software versions: ROCm 5.2.0, ROCm driver 5.16.9.22.20.
Own CUDA STREAM Variant, hipified for AMD (→ github.com/AndiH/CUDA-Cpp-STREAM). Value shown is best result from 20 runs, excluding first.
Detailed description: https://go.fzj.de/jsc-amdgpu-eval.
"""
fig.text(0.5, -0.01, disclaimer, color="darkgray", horizontalalignment="center", verticalalignment="top", fontsize="large")
fig.tight_layout()
for ext in ['png', 'pdf', 'svg']:
fig.savefig(f"stream_datavsthreads_mi250-abs.{ext}", dpi=300, bbox_inches='tight')
cols = ['Copy (Max) / GiB/s', 'Scale (Max) / GiB/s', 'Add (Max) / GiB/s', 'Triad (Max) / GiB/s']
fig, axs = plt.subplots(ncols=4, figsize=(14,4), sharey=True)
for i, (ax, col) in enumerate(zip(axs, cols)):
_df_pivot = df_stream['amd'].pivot_table(index="Threads per Block", values=col, columns="Data Size / MiB").sort_index(ascending=False)
sns.heatmap(
_df_pivot,
cmap="YlGnBu",
linewidths=3,
square=True,
fmt=",.0%",
cbar_kws={'label': 'Bandwidth / GiB/s'},
annot=(_df_pivot / (3200 * 1000 * 1000 * 1000 / 1024 / 1024 / 1024 / 1024 * 8192 / 8 / 2)),
vmin=df_stream['amd'][cols].min().min(),
vmax=df_stream['amd'][cols].max().max(),
ax=ax
)
ax.set_title(f'{col.split(" ")[0]}')
#ax.set_xlabel(2**int(ax.get_xlabel()))
if i > 0:
ax.set_ylabel(None)
if i < 3:
fig.get_axes()[-1].set_ylabel(None)
ax.set_xticklabels(label.get_text()[:-2] for label in ax.get_xticklabels())
#ax.xaxis.set_major_formatter(FormatStrFormatter('%.2f'))
fig.suptitle("GPU STREAM Variant for MI250 GPU", fontsize='xx-large')
disclaimer = """
Maps show various results of STREAM Benchmark Variants on single AMD MI250 GCD of JURECA DC Evaluation Platform.
Values in cells relative to peak (1490 GiB/s). Software versions: ROCm 5.2.0, ROCm driver 5.16.9.22.20.
Own CUDA STREAM Variant, hipified for AMD (→ github.com/AndiH/CUDA-Cpp-STREAM). Value shown is best result from 20 runs, excluding first.
Detailed description: https://go.fzj.de/jsc-amdgpu-eval.
"""
fig.text(0.5, -0.01, disclaimer, color="darkgray", horizontalalignment="center", verticalalignment="top", fontsize="large")
fig.tight_layout()
for ext in ['png', 'pdf', 'svg']:
fig.savefig(f"stream_datavsthreads_mi250-rel.{ext}", dpi=300, bbox_inches='tight')
df_stream['nvidia'] = pd.read_csv("stream-10608023-jrc0437--nvidia.csv")
df_stream['nvidia']["Data Size / MiB"] = df_stream['nvidia']["Exponent"].rpow(2) * 8/ 1024 / 1024
df_stream['nvidia'] = df_stream['nvidia'].set_index(["Data Size / MiB", "Threads per Block"])
cols = ['Copy (Max) / GiB/s', 'Scale (Max) / GiB/s', 'Add (Max) / GiB/s', 'Triad (Max) / GiB/s']
fig, axs = plt.subplots(ncols=4, figsize=(14,4), sharey=True)
for i, (ax, col) in enumerate(zip(axs, cols)):
_df_pivot_nv = df_stream['nvidia'].pivot_table(index="Threads per Block", values=col, columns="Data Size / MiB").sort_index(ascending=False)
sns.heatmap(
_df_pivot_nv,
cmap="YlGnBu",
linewidths=3,
#norm=LogNorm(df_stream.min().min(), df_stream.max().max()),
square=True,
fmt=".0f",
cbar_kws={'label': 'Bandwidth / GiB/s'},
annot=_df_pivot_nv.astype(int),
#mask=mask,
vmin=df_stream['nvidia'][cols].min().min(),
vmax=df_stream['nvidia'][cols].max().max(),
ax=ax
)
ax.set_title(f'{col.split(" ")[0]}')
if i > 0:
ax.set_ylabel(None)
if i < 3:
fig.get_axes()[-1].set_ylabel(None)
ax.set_xticklabels(label.get_text()[:-2] for label in ax.get_xticklabels())
fig.suptitle("GPU STREAM Variant for A100 GPU", fontsize='xx-large')
disclaimer = """
Maps show various results of STREAM benchmark variants on single A100 GPU of JURECA DC.
Values in cells in GiB/s. Software versions: CUDA 11.5, CUDA driver 510.47.03.
Own CUDA STREAM Variant (→ github.com/AndiH/CUDA-Cpp-STREAM). Value shown is best result from 20 runs, excluding first.
Detailed description: https://go.fzj.de/jsc-amdgpu-eval.
"""
fig.text(0.5, -0.01, disclaimer, color="darkgray", horizontalalignment="center", verticalalignment="top", fontsize="large")
fig.tight_layout()
for ext in ['png', 'pdf', 'svg']:
fig.savefig(f"stream_datavsthreads_a100-abs.{ext}", dpi=300, bbox_inches='tight')
cols = ['Copy (Max) / GiB/s', 'Scale (Max) / GiB/s', 'Add (Max) / GiB/s', 'Triad (Max) / GiB/s']
fig, axs = plt.subplots(ncols=4, figsize=(14,4), sharey=True)
for i, (ax, col) in enumerate(zip(axs, cols)):
_df_pivot_nv = df_stream['nvidia'].pivot_table(index="Threads per Block", values=col, columns="Data Size / MiB").sort_index(ascending=False)
sns.heatmap(
_df_pivot_nv,
cmap="YlGnBu",
linewidths=3,
#norm=LogNorm(df_stream.min().min(), df_stream.max().max()),
square=True,
fmt=",.0%",
cbar_kws={'label': 'Bandwidth / GiB/s'},
annot=_df_pivot / (1555.2 * 1000 * 1000 * 1000 / 1024 / 1024 / 1024),
#mask=mask,
vmin=df_stream['nvidia'][cols].min().min(),
vmax=df_stream['nvidia'][cols].max().max(),
ax=ax
)
ax.set_title(f'{col.split(" ")[0]}')
if i > 0:
ax.set_ylabel(None)
if i < 3:
fig.get_axes()[-1].set_ylabel(None)
ax.set_xticklabels(label.get_text()[:-2] for label in ax.get_xticklabels())
fig.suptitle("GPU STREAM Variant for A100 GPU", fontsize='xx-large')
disclaimer = """
Maps show various results of STREAM benchmark variants on single A100 GPU of JURECA DC.
Values in cells relative to peak (1448 GiB/s). Software versions: CUDA 11.5, CUDA driver 510.47.03.
Own CUDA STREAM Variant (→ github.com/AndiH/CUDA-Cpp-STREAM). Value shown is best result from 20 runs, excluding first.
Detailed description: https://go.fzj.de/jsc-amdgpu-eval.
"""
fig.text(0.5, -0.01, disclaimer, color="darkgray", horizontalalignment="center", verticalalignment="top", fontsize="large")
fig.tight_layout()
for ext in ['png', 'pdf', 'svg']:
fig.savefig(f"stream_datavsthreads_a100-rel.{ext}", dpi=300, bbox_inches='tight')
In the following, the raw data of the analysis here is shown. The data is read from the CSV file and presented in base64-encoding. To write to a file (or a StringIO
object directly), just invert the procedure and copy the string and b64decode it.
from pathlib import Path
import base64
for host, sizes in name_files.items():
for size, file in sizes.items():
_file_content = Path(file).read_text()
b64_content = base64.b64encode(_file_content.encode('utf-8'))
print(f'df[{host}][{size}]: {b64_content}')
df[scan][amd]: b'Q29weSAoTWF4KSAvIEdpQi9zLENvcHkgKE1pbikgLyBHaUIvcyxDb3B5IChBdmcpIC8gR2lCL3MsU2NhbGUgKE1heCkgLyBHaUIvcyxTY2FsZSAoTWluKSAvIEdpQi9zLFNjYWxlIChBdmcpIC8gR2lCL3MsQWRkIChNYXgpIC8gR2lCL3MsQWRkIChNaW4pIC8gR2lCL3MsQWRkIChBdmcpIC8gR2lCL3MsVHJpYWQgKE1heCkgLyBHaUIvcyxUcmlhZCAoTWluKSAvIEdpQi9zLFRyaWFkIChBdmcpIC8gR2lCL3MKMC4wMDE0LDAuMDAxMiwwLjAwMTMsMC4wMDE1LDAuMDAxMiwwLjAwMTMsMC4wMDIwLDAuMDAxNiwwLjAwMTksMC4wMDIwLDAuMDAxOCwwLjAwMTkKMC4wMDMwLDAuMDAyMywwLjAwMjYsMC4wMDI3LDAuMDAyNSwwLjAwMjYsMC4wMDQxLDAuMDAzNywwLjAwMzksMC4wMDQxLDAuMDAzNywwLjAwMzgKMC4wMDU0LDAuMDA0NSwwLjAwNTIsMC4wMDU0LDAuMDA0OSwwLjAwNTIsMC4wMDgyLDAuMDA3NCwwLjAwNzcsMC4wMDgyLDAuMDA2OSwwLjAwNzcKMC4wMTE5LDAuMDA5MSwwLjAxMDcsMC4wMTA5LDAuMDA5OCwwLjAxMDQsMC4wMTYzLDAuMDEzNiwwLjAxNTMsMC4wMTYzLDAuMDE0NywwLjAxNTEKMC4wMjM4LDAuMDE4MiwwLjAyMDcsMC4wMjM4LDAuMDIwMCwwLjAyMTIsMC4wMzI2LDAuMDI3MywwLjAzMDYsMC4wMzI2LDAuMDI3MywwLjAzMDQKMC4wNDM1LDAuMDM5MiwwLjA0MjAsMC4wNDM1LDAuMDI4MiwwLjA0MDMsMC4wNjUyLDAuMDU0NSwwLjA2MDEsMC4wNjUyLDAuMDU1NiwwLjA2MTgKMC4wOTUyLDAuMDc4NCwwLjA4MzYsMC4wODcwLDAuMDcyNywwLjA4MTIsMC4xMzA0LDAuMTA5MSwwLjEyMTcsMC4xMzA0LDAuMDgzMywwLjExODkKMC4xNzM5LDAuMTQ4MSwwLjE2NDcsMC4xNzM5LDAuMTQ1NSwwLjE2MjIsMC4yNjA5LDAuMjM1MywwLjI0NTIsMC4yNDAwLDAuMjE4MiwwLjIzNjUKMC4zNDc4LDAuMjkwOSwwLjMyMjcsMC4zNDc4LDAuMjkwOSwwLjMyMzcsMC40ODAwLDAuNDA2OCwwLjQ0MTQsMC40ODAwLDAuNDM2NCwwLjQ2MDEKMC42NDAwLDAuNTQyNCwwLjU5MzIsMC42NDAwLDAuNTgxOCwwLjYxMTEsMC44Mjc2LDAuNzA1OSwwLjc4MjgsMC44Mjc2LDAuNzYxOSwwLjc4NjIKMS4xMDM0LDAuOTU1MiwxLjA0OTIsMS4xMDM0LDAuOTU1MiwxLjA2MzksMS42NTUyLDEuNTIzOCwxLjU5MTYsMS42NTUyLDEuNDMyOCwxLjU2OTcKMi4yMDY5LDIuMDMxNywyLjEwMjAsMi4yMDY5LDEuOTEwNCwyLjEyNDAsMy4zMTAzLDMuMDQ3NiwzLjE3NDksMy4zMTAzLDIuNDAwMCwzLjA2MDQKNC40MTM4LDQuMDYzNSw0LjI1OTIsNC40MTM4LDQuMDYzNSw0LjI0NDMsNi42MjA3LDUuNDA4NSw2LjIxNDcsNi42MjA3LDYuMDk1Miw2LjIxOTkKOC44Mjc2LDYuNzM2OCw4LjQwMDcsOC44Mjc2LDcuNjQxOCw4LjUwMzUsMTMuMjQxNCwxMi4xOTA1LDEyLjUyNTMsMTMuMjQxNCwxMi4xOTA1LDEyLjgxMTIKMTUuMjgzNiwxNC40MjI1LDE1LjA3MDUsMTUuMjgzNiwxMy42NTMzLDE0Ljc3MzAsMjIuOTI1NCwxOS4yMDAwLDIyLjAyNTcsMjQuMzgxMCwyMS4zMzMzLDIyLjQxNDcKMzAuNTY3MiwyNS42MDAwLDI4Ljg4NzksMzAuNTY3MiwyOC40NDQ0LDI4LjgwMjQsNDMuMjY3Niw0MC40MjExLDQxLjQyNTEsNDUuODUwNywzOC40MDAwLDQyLjY5NzkKNjEuMTM0Myw1NC42MTMzLDU3LjA1NTcsNTcuNjkwMSw0OC43NjE5LDU1LjI3MjcsODYuNTM1Miw2NC4wMDAwLDgwLjAxMTAsODYuNTM1Miw4MC44NDIxLDg0LjM0NjgKMTE1LjM4MDMsMTAyLjQwMDAsMTEyLjM4MTIsMTE1LjM4MDMsMTAyLjQwMDAsMTEwLjM4ODcsMTYzLjg0MDAsMTUzLjYwMDAsMTYxLjEyNjMsMTczLjA3MDQsMTQ2LjI4NTcsMTYyLjI0NjAKMjMwLjc2MDYsMjA0LjgwMDAsMjE0Ljk4MzQsMjE4LjQ1MzMsMTk1LjA0NzYsMjA5Ljc2ODIsMzI3LjY4MDAsMjc5LjI3MjcsMzA0LjAwMDAsMzExLjA4ODYsMjkyLjU3MTQsMjk5LjMyMzEKMzkwLjA5NTIsMzUyLjM0NDEsMzcxLjI1MzQsMzkwLjA5NTIsMzEyLjA3NjIsMzYzLjIzOTIsNTM0LjI2MDksNDUwLjkzNTgsNDg5LjIwMjcsNDkxLjUyMDAsNDY4LjExNDMsNDgyLjYyOTUKNTc5Ljk2NDYsNTU1LjM4OTgsNTcxLjQ0NzUsNTc5Ljk2NDYsNTQxLjYxOTgsNTY5LjM1NzEsNzEyLjM0NzgsNjY4LjczNDcsNjg3Ljk0NzAsNjkyLjI4MTcsNjY4LjczNDcsNjg4LjQ1NDEKODI0LjM1MjIsNzg0Ljg2MjMsODA2LjIwNTIsODA0LjEyMjcsNzgwLjE5MDUsNzk2LjE1MzUsOTAxLjg3MTYsODg1LjYyMTYsODk5LjA0OTgsOTE4LjcyOTAsODM2LjYyOTgsODk2LjAzMDcKMTAyOC4wMTU3LDk4OS4yMjI2LDEwMDguMjQ2MiwxMDA4LjI0NjIsOTc0LjUxMzAsOTkxLjk4MDksMTA1NC4xOTg0LDEwMjkuMzYxMywxMDQ1LjA1NTgsMTA2NS42MjYwLDEwMjkuMzYxMywxMDQ0LjE3OTUKMTE2Ny42NzkzLDkwNS41MDYwLDExNDAuMTQ3OSwxMTcwLjI4NTcsMTAyNC4wMDAwLDExNDcuMzcwNywxMTU4LjIyMDksMTEyOS45MzEwLDExNTEuMzQ5MSwxMTY1LjA4NDQsMTE0My4wNjk4LDExNTcuMTQ0NgoxMjMwLjcyMzAsMTIwOC4wMzY5LDEyMTYuODkxMywxMjMyLjE2OTIsMTIwMi40OTU0LDEyMTQuNTE3NCwxMTkwLjY2MTYsMTE2NC4yMjIxLDExNzcuMzM5OCwxMTk0LjI3NzksMTE3NS41MzM2LDExODYuNTQ4NwoxMjk1LjMzNzksMTI4OC45Njg3LDEyOTMuMTUxOSwxMjk1LjMzNzksMTI4NS44MDc1LDEyOTEuMDE1MCwxMjUwLjI4OTMsMTIzOS40NTE1LDEyNDUuMzM5NywxMjUyLjI4MDMsMTIzOS45NDAxLDEyNDYuNTYwNAoxMzE0LjAwNTAsMTI4My44Mzk2LDEyOTYuMjIyOCwxMzA3LjA0MzksMTI4MS44Nzc4LDEyOTUuMzE2OCwxMjQxLjg5ODEsMTIyNi42NDM4LDEyMzUuOTAxOSwxMjU4LjI5MTIsMTIyMS40MDQ4LDEyMzguMzM0NQoxMzI2LjI2MjEsMTI5Mi43NDI4LDEzMTAuMjc4MiwxMzI1LjIxNDUsMTI5MC4xNTgxLDEzMDUuOTgzNywxMjYyLjA3NzQsMTIxNy45NzYyLDEyMzUuOTAxOSwxMjY3LjQxNjYsMTIzMy40OTc5LDEyNDYuMTA1NgoxMzI1LjAwNTIsMTI5NC44MzgwLDEzMDguMDg5OCwxMzE2LjI3MzAsMTI5Ny44NDMwLDEzMDQuNDk3OSwxMjUxLjI4NDAsMTIxOC45NzkxLDEyMzUuNzk2NSwxMjU2LjU5NDgsMTIzNS41NTcwLDEyNDUuMjIyOQo=' df[scan][nvidia]: b'Q29weSAoTWF4KSAvIEdpQi9zLENvcHkgKE1pbikgLyBHaUIvcyxDb3B5IChBdmcpIC8gR2lCL3MsU2NhbGUgKE1heCkgLyBHaUIvcyxTY2FsZSAoTWluKSAvIEdpQi9zLFNjYWxlIChBdmcpIC8gR2lCL3MsQWRkIChNYXgpIC8gR2lCL3MsQWRkIChNaW4pIC8gR2lCL3MsQWRkIChBdmcpIC8gR2lCL3MsVHJpYWQgKE1heCkgLyBHaUIvcyxUcmlhZCAoTWluKSAvIEdpQi9zLFRyaWFkIChBdmcpIC8gR2lCL3MKMC4wMDE3LDAuMDAxNSwwLjAwMTYsMC4wMDE5LDAuMDAxNSwwLjAwMTYsMC4wMDI1LDAuMDAyMiwwLjAwMjUsMC4wMDI1LDAuMDAyMiwwLjAwMjQKMC4wMDMzLDAuMDAyMCwwLjAwMzAsMC4wMDM0LDAuMDAzMywwLjAwMzMsMC4wMDUxLDAuMDA0NSwwLjAwNDksMC4wMDU1LDAuMDA0NSwwLjAwNDkKMC4wMDY4LDAuMDA2MCwwLjAwNjEsMC4wMDY4LDAuMDA2MCwwLjAwNjYsMC4wMTAxLDAuMDA5OSwwLjAwOTksMC4wMTAxLDAuMDA2OSwwLjAwOTYKMC4wMTMyLDAuMDExOSwwLjAxMjQsMC4wMTUyLDAuMDExOSwwLjAxMzMsMC4wMjAzLDAuMDE3OSwwLjAxOTIsMC4wMjAzLDAuMDE3OSwwLjAxOTcKMC4wMjcwLDAuMDIzOCwwLjAyNTgsMC4wMjk0LDAuMDE4MiwwLjAyNTksMC4wNDU1LDAuMDM1NywwLjA0MDMsMC4wNDU1LDAuMDM1NywwLjAzODYKMC4wNTQxLDAuMDQ3NiwwLjA1MTQsMC4wNTQxLDAuMDQ3NiwwLjA1MjEsMC4wODExLDAuMDcxNCwwLjA3NjYsMC4wODExLDAuMDcxNCwwLjA3NjkKMC4xMDgxLDAuMDgwMCwwLjEwMzMsMC4xMDgxLDAuMDk1MiwwLjEwNDcsMC4xNzY1LDAuMTQyOSwwLjE1NjIsMC4xNjIyLDAuMTQyOSwwLjE1MjQKMC4yMTYyLDAuMTQ1NSwwLjE5OTUsMC4yMTYyLDAuMTkwNSwwLjIwNDksMC4zMjQzLDAuMjg1NywwLjMxMTEsMC4zMjQzLDAuMjg1NywwLjMwNjAKMC4zOTAyLDAuMzQ3OCwwLjM3OTUsMC40MjExLDAuMzgxMCwwLjM4NTMsMC42NDg2LDAuNTcxNCwwLjYyOTAsMC42NDg2LDAuNTcxNCwwLjU4MDkKMC44NjQ5LDAuNzYxOSwwLjc3NjUsMC44NjQ5LDAuNTkyNiwwLjc5MjcsMS4yNjMyLDEuMTQyOSwxLjE2MTgsMS4yNjMyLDEuMTQyOSwxLjE0ODYKMS43Mjk3LDEuNTIzOCwxLjU0MzEsMS43Mjk3LDEuNTIzOCwxLjYxNzAsMi41OTQ2LDIuMjg1NywyLjMyNjUsMi4zNDE1LDIuMjg1NywyLjI5MTUKMy4xMjIwLDMuMDQ3NiwzLjA1NTMsMy40NTk1LDMuMDQ3NiwzLjE1MDMsNS4wNTI2LDMuMjU0Miw0LjU0MzAsNS4xODkyLDQuNTcxNCw0LjY2NTAKNi4wOTUyLDYuMDk1Miw2LjA5NTIsNi45MTg5LDUuNTY1Miw2LjI1MTksMTAuMTA1Myw5LjE0MjksOS4yMzU0LDEwLjM3ODQsOS4xNDI5LDkuMjU4OQoxMi40ODc4LDEwLjI0MDAsMTIuMDM5NiwxMy44Mzc4LDEyLjE5MDUsMTIuNTE5OSwyMC43NTY4LDE4LjI4NTcsMTguNjM2MCwyMC4yMTA1LDE2LjY5NTcsMTguNDAxMAoyNC45NzU2LDIyLjI2MDksMjQuMTk5MCwyNi45NDc0LDE4LjYxODIsMjQuNDczMCw0MC40MjExLDM2LjU3MTQsMzcuMTc3MSwzNy40NjM0LDMzLjM5MTMsMzYuMjk4NQo1My44OTQ3LDQzLjU3NDUsNDcuODAzNCw0OS45NTEyLDQ4Ljc2MTksNDguODIzMSw3NC45MjY4LDczLjE0MjksNzMuMzI2Niw3NC45MjY4LDY1LjM2MTcsNzEuMzU0NQo5Ny41MjM4LDg5LjA0MzUsOTUuNjA2OSw5OS45MDI0LDY5LjQyMzcsOTQuMjE3OSwxNDkuODUzNywxMzAuNzIzNCwxNDIuMDE0NiwxNDYuMjg1NywxMzMuNTY1MiwxMzkuMzAzMQoxOTUuMDQ3NiwxNzQuMjk3OSwxODAuNzc1OCwxOTkuODA0OSwxNzguMDg3MCwxODYuODUyMywyOTIuNTcxNCwyMjcuNTU1NiwyNjguNjY3NCwyOTIuNTcxNCwyNDAuOTQxMiwyNjMuNTEyNAozNTYuMTczOSwzMjcuNjgwMCwzNDMuNTkzOCwzNTYuMTczOSwzMjcuNjgwMCwzNTAuMTY0Miw1MzQuMjYwOSwyNTYuMDAwMCw0OTMuNTk4Myw1MzQuMjYwOSw0ODEuODgyNCw0OTcuODA4MQo2NDIuNTA5OCw1OTUuNzgxOCw2MDMuMjg2OCw2NTUuMzYwMCw1OTUuNzgxOCw2MjMuMjE1Miw5NjMuNzY0Nyw3MzMuNjExOSw4ODUuMjAxOSw5MTAuMjIyMiw4MzMuMDg0Nyw4OTIuODE4NAoxMDQwLjI1NDAsOTYzLjc2NDcsOTkyLjk2OTcsMTA0MC4yNTQwLDk2My43NjQ3LDk4Ni42NzUxLDE0NjcuMjIzOSwxMzY1LjMzMzMsMTQzMC4xNTAxLDE0NjcuMjIzOSwxMjI4LjgwMDAsMTQwNS4zOTk1CjEwNzQuMzYwNyw5NzguMTQ5MywxMDMwLjM1NTAsOTQ5Ljc5NzEsODY4LjAyNjUsOTA5LjU1NzMsMTA0MC4yNTQwLDkwMS44NzE2LDk3Ny4zODE1LDEwMTguNjk0Myw5NTQuNDA3OCw5ODcuNzE4NwoxMTM0LjgyMjUsMTA3OC43ODE5LDExMDUuNjAxOCwxMDc4Ljc4MTksMTA0MC4yNTQwLDEwNTIuNTY0NywxMTA0LjUzOTMsMTA2NS42MjYwLDEwODMuNzExMSwxMTI5LjkzMTAsMTEwNC41MzkzLDExMTguMDk0MAoxMTkxLjU2MzYsMTE2Ny42NzkzLDExODUuMTg0MSwxMTcwLjI4NTcsMTE0Ny4yMzg1LDExNTYuOTY1NCwxMTk1LjE4NTQsMTE3Mi4wMjk4LDExODIuMzIzOCwxMjE3LjM4NzAsMTE5NS4xODU0LDEyMDcuOTM5MgoxMjQ5Ljc5MjYsMTIzMC43MjMwLDEyNDEuMDcyOSwxMjI2LjQwNDcsMTIxMy42Mjk2LDEyMTkuODcxNywxMjQyLjM4ODYsMTIzNy41MDEyLDEyNDAuMDE3MywxMjU4LjI5MTIsMTI0OS4yOTYzLDEyNTMuMjc4MQoxMjcyLjU0MzcsMTI2Mi41ODQwLDEyNjcuMDAwMiwxMjU5LjU1MDgsMTI1Mi43NzkwLDEyNTUuNzQwMSwxMjc1LjYzOTksMTI2OC45NTA0LDEyNzIuMzgxMSwxMjc5Ljc5MTcsMTI3Ny43MTI0LDEyNzguNjk2NQoxMjgyLjI2OTYsMTI3OC43NTEyLDEyNzkuOTYzMCwxMjc3LjE5MzcsMTI3Mi4xNTc3LDEyNzQuNzIxNywxMjg5Ljc2MTQsMTI4Ni41OTYzLDEyODguNTkzNSwxMjkzLjIwNzgsMTI4OC43MDQ2LDEyOTEuNzgyNAoxMjg4Ljc3MDYsMTI4Ni4yMDE4LDEyODcuNzYwNiwxMjg2Ljk5MTEsMTI4My42NDMyLDEyODUuNTU4NiwxMjk4LjE0NDIsMTI5NC4yNzIwLDEyOTYuNjIzNSwxMjk5Ljg4NzYsMTI5NS44NzE1LDEyOTguMTA5MAoxMjk0LjQzODQsMTI4OS44NjA1LDEyOTEuODE1NSwxMjkzLjYzOTksMTI4OS4wNjc3LDEyOTAuNTQ0NiwxMzAyLjM3NjYsMTI2OC43NTg1LDEyOTIuODQ3NywxMzAyLjcxMzcsMTI2OS4yNzA0LDEyOTMuMTA5OQ=='