#!/usr/bin/env python # coding: utf-8 # # Jungfrau 16M geometry reconstruction # # This notebook contains a few tools to rebuild the 2D images from the raw data, apply the pedestal correciton and so on. # In[1]: #Check HDF5 plugin works ... import h5py import numpy import bitshuffle.h5 print(h5py.__version__) # >= '2.5.0' f = h5py.File("filename.h5", "w") # block_size = 0 let Bitshuffle choose its value block_size = 0 dataset = f.create_dataset( "data", (100, 100, 100), compression=bitshuffle.h5.H5FILTER, compression_opts=(block_size, bitshuffle.h5.H5_COMPRESS_LZ4), dtype='float32', ) # create some random data array = numpy.random.rand(100, 100, 100) array = array.astype('float32') dataset[:] = array f.close() # In[2]: import bitshuffle, os print(os.environ.get("HDF5_PLUGIN_PATH")) import numpy, scipy, pyFAI, h5py, bitshuffle.h5 print(h5py.version.hdf5_version_tuple) get_ipython().run_line_magic('matplotlib', 'nbagg') from matplotlib.pyplot import subplots # In[42]: prefix = '/scratch/kieffer/JF16/' ped_file = prefix + 'pedestal_20190324_0846.JF07T32V01.h5' gain_file = prefix + 'gains_201810.h5' img_files = prefix + "lysoembl_32_1153.JF07T32V01.h5" #%time ref = calc_pedestal(ped_file, 0 , 100) with h5py.File(gain_file, "r") as h: gain = h["gains"][...] # In[4]: gains = gain.astype("float32") print(gain[0]) print(gain[1]) # In[158]: get_ipython().run_cell_magic('time', '', 'with h5py.File(ped_file, mode="r") as h:\n k = list(h["data"].keys())[0]\n ds = h["data/%s/data"%k][:, 50, 60]\n') # In[159]: print(ds.shape) fig,ax = subplots() ax.hist(ds, 100) # In[160]: fig,ax = subplots() ax.plot(ds) # In[161]: dgain = (ds>>14) M1 = dgain == 0 M2 = dgain == 1 M3 = dgain == 2 M4 = dgain == 3 # In[162]: print(M1.sum(),M2.sum(),M3.sum(), M4.sum()) # In[163]: fig,ax = subplots(3) ax[0].hist(ds[M1], 100) ax[1].hist(ds[M2]&((1<<14)-1), 100) ax[2].hist(ds[M4]&((1<<14)-1), 100) # In[11]: def plot_distrib(fn, pix0=0, pix1=0): with h5py.File(fn, "r") as h: k = list(h["data"].keys())[0] ds = h["data/%s/data"%k][slice(None),pix0, pix1] dgain = (ds>>14) M1 = dgain == 0 M2 = dgain == 1 M4 = dgain == 3 fig,ax = subplots(3) ax[0].hist(ds[M1], 100) ax[1].hist(ds[M2]&((1<<14)-1), 100) ax[2].hist(ds[M4]&((1<<14)-1), 100) return ax # In[12]: plot_distrib(ped_file, 10, 100) # In[13]: fig,ax = subplots() ax.plot(numpy.arcsinh(ds)) # In[14]: get_ipython().run_line_magic('load_ext', 'Cython') # In[15]: get_ipython().run_cell_magic('cython', '--compile-args=-fopenmp --link-args=-fopenmp', '\nimport cython\nimport h5py\nimport numpy\ncimport numpy as cnumpy\nfrom cython.parallel import prange\n\ncdef class StatAccumulator:\n cdef:\n readonly int bits\n readonly int frames\n readonly int height\n readonly int width\n readonly cnumpy.uint32_t[:, :, ::1] count # gain is first index\n readonly cnumpy.float64_t[:, :,::1] sum, M2 \n def __cinit__(self, shape, int bits=14):\n self.bits = bits\n ngains = 1<<(16-bits)\n self.frames = 0\n self.height = shape[0]\n self.width = shape[-1]\n self.count = numpy.zeros([ngains, self.height, self.width], dtype=numpy.uint32)\n self.sum = numpy.zeros([ngains, self.height, self.width], dtype=numpy.float64)\n self.M2 = numpy.zeros([ngains, self.height, self.width], dtype=numpy.float64)\n \n def __deallocate(self):\n self.count = None\n self.summ = None\n self.M2 = None\n \n @cython.wraparound(False)\n @cython.boundscheck(False)\n @cython.initializedcheck(False)\n @cython.cdivision(True)\n def feed(self, cnumpy.uint16_t[:, ::1] frame):\n cdef:\n int gain, i, j, mask\n int value, trimmed, cnt\n double to_store, sm, delta\n \n mask = (1<>self.bits\n trimmed = value & mask\n to_store = trimmed\n cnt = self.count[gain, i, j] + 1 \n self.count[gain, i, j] = cnt\n sm = self.sum[gain, i, j] + to_store\n self.sum[gain, i, j] = sm \n delta = (sm/cnt)-to_store\n self.M2[gain, i, j] = self.M2[gain, i, j] + delta*delta*(cnt-1)/cnt\n \n def stats(self):\n mean = numpy.asarray(self.sum)/numpy.asarray(self.count)\n std = numpy.sqrt(numpy.asarray(self.M2)/(numpy.asarray(self.count)-1))\n return mean, std\n\n@cython.wraparound(False)\n@cython.boundscheck(False)\n@cython.initializedcheck(False)\n@cython.cdivision(True)\ndef correct_frame(cnumpy.uint16_t[:,::1] raw, \n cython.floating[:, :,::1] gain, \n cython.floating[:, :,::1] pedestal):\n cdef:\n int n, m, g, i, j, value, mask, nbits=14\n cnumpy.float32_t[:, ::1] result\n cython.floating gain_value\n m = raw.shape[0]\n n = raw.shape[1]\n mask = (1<> nbits\n if g == 3:\n g = 2\n gain_value = gain[g, i, j]\n result[i, j] = ((value&mask) - pedestal[g, i, j]) / gain_value if gain_value else 0.0\n return numpy.asarray(result)\n') # In[16]: def stat_stack(filename, start=0, stop=-1): "A tool to perform some statistics analysis on a bunch of frames" with h5py.File(filename, mode="r") as h: k = list(h["data"].keys())[0] ds = h["data/%s/data"%k] acc = StatAccumulator(ds.shape[-2:]) if stop == -1: stop = ds.shape[-1] for i in range(start, stop): acc.feed(ds[i]) return acc.stats() # In[17]: get_ipython().run_line_magic('time', 'slices1k = stat_stack(ped_file, 0, 1500)') fig, ax = subplots() ax.imshow(slices1k[1][0].reshape(-1, 4096)) numpy.nanmean(slices1k[1][0]) # In[18]: get_ipython().run_line_magic('time', 'slices2k = stat_stack(ped_file, 2000, 2500)') # In[19]: fig, ax = subplots() ax.imshow(slices2k[0][0].reshape(-1, 4096)) numpy.nanmean(slices2k[0][0]) # In[20]: get_ipython().run_line_magic('time', 'slices3k = stat_stack(ped_file, 2600, 3000)') fig, ax = subplots() ax.imshow(slices3k[1][1].reshape(-1, 4096)) numpy.nanmean(slices3k[1][1]) # In[21]: get_ipython().run_line_magic('time', 'slices4k = stat_stack(ped_file, 3110, 3500)') fig, ax = subplots() ax.imshow(slices4k[1][3].reshape(-1, 4096)) numpy.nanmean(slices4k[1][3]) # In[22]: slices4k[0].shape # In[23]: fig, ax = subplots(2,2) for i,a in enumerate(ax.flat): a.imshow(slices4k[0][i].reshape(-1, 4096)) a.set_title("Mean channel %i"%i) # In[24]: get_ipython().run_cell_magic('time', '', 'pedestal_mean = numpy.zeros_like(slices4k[0])\npedestal_mean[0] = slices2k[0][0]\npedestal_mean[1] = slices3k[0][1]\npedestal_mean[2] = slices4k[0][3]\n\npedestal_std = numpy.zeros_like(slices4k[1])\npedestal_std[0] = slices2k[1][0]\npedestal_std[1] = slices3k[1][1]\npedestal_std[2] = slices4k[1][3]\n\n\nwith h5py.File(prefix+"pedestal_20190324_avg_bs.h5", "w") as f:\n f.create_dataset("mean",\n pedestal_mean.shape,\n compression=bitshuffle.h5.H5FILTER,\n compression_opts=(0, bitshuffle.h5.H5_COMPRESS_LZ4),\n dtype=\'float32\',\n data=pedestal_mean\n )\n f.create_dataset("std",\n pedestal_std.shape,\n compression=bitshuffle.h5.H5FILTER,\n compression_opts=(0, bitshuffle.h5.H5_COMPRESS_LZ4),\n dtype=\'float32\',\n data=pedestal_std\n )\n f.create_dataset("gain",\n gain.shape,\n compression=bitshuffle.h5.H5FILTER,\n compression_opts=(0, bitshuffle.h5.H5_COMPRESS_LZ4),\n dtype=\'float32\',\n data=gain\n )\n \n') # In[25]: from numba import jit @jit(nopython=True, nogil=True, cache=True) def do_corrections(m, n, image, G, P, pede_mask, mask, mask2): gain_mask = np.bitwise_and(np.right_shift(image, 14), mask2) data = np.bitwise_and(image, mask) res = np.empty((m, n), dtype=np.float32) for i in range(m): for j in range(n): if pede_mask[i][j] != 0: res[i][j] = 0 continue gm = gain_mask[i][j] if gm == 3: gm = 2 res[i][j] = (data[i][j] - P[gm][i][j]) / G[gm][i][j] return res # In[26]: with h5py.File(prefix+"pedestal_20190324_avg_bs.h5", "r") as h: pedestal_mean = h["mean"][...] pedestal_std = h["std"][...] pedestal_gain = h["gain"][...] with h5py.File(img_files, "r") as f: k = list(f["data"].keys())[0] raw = f["data/%s/data"%k][...] print(raw.shape) # In[27]: #ls /mntdirect/_scisoft/users/jupyter/jupy35/lib/python3.5/site-packages/bitshuffle/plugin # In[28]: #%load_ext Cython # In[29]: print(raw.shape) print(gain.shape) print(pedestal_mean.shape) get_ipython().run_line_magic('timeit', 'correct_frame(raw[0], gain, pedestal_mean)') # In[30]: get_ipython().run_line_magic('matplotlib', 'nbagg') from matplotlib.pylab import subplots import numpy # In[31]: fig, ax = subplots() img = correct_frame(raw[250], gain, pedestal_mean) ax.imshow(img.reshape(-1, 4096).clip(0,100)) # In[33]: import bitshuffle.h5 with h5py.File("/scratch/kieffer/JF16/lysoembl_32_1153_intensity_corrected_bs.h5", "w") as h5: data_grp = h5.require_group("data") ds = data_grp.require_dataset("data", shape=raw.shape, chunks=(1,)+raw.shape[1:], compression=bitshuffle.h5.H5FILTER, compression_opts=(0, bitshuffle.h5.H5_COMPRESS_LZ4), dtype=numpy.float32) print(ds.shape) print(ds.dtype) for idx, frame in enumerate(raw): ds[idx] = correct_frame(frame, gain, pedestal_mean) # In[86]: #Pedastal calculation using the GPU ! import pyopencl from pyopencl import array as cla ctx = pyopencl.create_some_context(interactive=True) queue = pyopencl.CommandQueue(ctx) shape = 16*1024,1024 frame_d = cla.empty(queue, shape[-2:], dtype="uint16") corrected_d = cla.empty(queue, shape[-2:], dtype="float32") icorrected_d = cla.empty(queue, shape[-2:], dtype="uint32") pedestal_d = cla.to_device(queue, pedestal_mean.astype("float32")) error_d = cla.to_device(queue, pedestal_std.astype("float32")) gains_d = cla.to_device(queue, gain.astype("float32")) bits = 14 ngains = 1<<(16-bits) print((ngains, shape[-2], shape[-1])) count_d = cla.empty(queue, (ngains, shape[-2], shape[-1]), dtype="uint32") sum_d = cla.empty(queue, (ngains, shape[-2], shape[-1]), dtype="float32") M2_d = cla.empty(queue, (ngains, shape[-2], shape[-1]), dtype="float32") get_ipython().run_line_magic('load_ext', 'pyopencl.ipython_ext') ctx # In[87]: frame_d.dtype, corrected_d.dtype, pedestal_d.dtype, error_d.dtype, gains_d.dtype # In[88]: get_ipython().run_cell_magic('cl_kernel', '', '\n#define BITS_MANTISSA_FLOAT 23\n\ninline float truncate_precision32(float value, \n uchar prec_bits) \n{\n union{ float f; uint u;} work;\n work.f = value;\n if (prec_bits < BITS_MANTISSA_FLOAT)\n {\n int zeroed_bits = BITS_MANTISSA_FLOAT - prec_bits;\n uint mask = ~((1 << zeroed_bits) - 1);\n work.u &= mask;\n }\n return work.f;\n}\n\ninline float correct_pedestal(global ushort *raw,\n global float *gain,\n global float *pedestal,\n uint size)\n{\n uint idx = get_global_id(0);\n float result = 0.0f;\n if (idx>14;\n value &= (1<<14)-1;\n g = (g==3)?2:g;\n uint read_at = g*size + idx;\n float gain_value = gain[read_at];\n if (gain_value!=0.0f)\n result = (value - pedestal[read_at]) / gain_value;\n }\n return result;\n}\ninline float correct_pedestal_cutoff(global ushort *raw,\n global float *gain,\n global float *pedestal,\n global float *error,\n uint size,\n uint nsigma)\n{\n uint idx = get_global_id(0);\n float result = 0.0f;\n if (idx>14;\n value &= (1<<14)-1;\n g = (g==3)?2:g;\n uint read_at = g*size + idx;\n result = value - pedestal[read_at];\n if (g==0 && result>14)>0).sum() for i in raw] print(logain) ax.plot(logain) ax.set_xlabel("frame #") ax.set_ylabel("Number of non-zero pixel") ax.set_title("NNZ data per frame") # # Geometry description ... # In[ ]: ls # In[ ]: get_ipython().system('cat JF16M.geom') # In[ ]: from collections import OrderedDict class JungFrauModule: shape = (512, 1024) def __init__(name, start_x): pass def parse_geometry(fn): modules = OrderedDict() for line in open(fn): if line.startswith(";"): continue elif not line.strip(): continue elif "=" in line and line.startswith("m"): lhs, rhs = line.split("=",1) try: m,a = lhs.split("/", 1) except: print(lhs) if m not in modules: modules[m] = OrderedDict() module = modules[m] module[a.strip()] = rhs.strip() return modules # In[ ]: modules = parse_geometry("JF16M.geom") # In[ ]: fig, ax = subplots() xmin=10000 xmax=-10000 ymin=10000 ymax=-10000 for name, module in modules.items(): #rint(name, module) x = float(module["corner_x"]) y = float(module["corner_y"]) xmax = max(x, xmax) xmin = min(x, xmin) ymax = max(y, ymax) ymin = min(y, ymin) ax.annotate(name, xy=(x,y), xytext=(x + 10, y + 10), #weight="bold", size="large", color="black", arrowprops=dict(facecolor='red', edgecolor='red')) ax.set_ylim(ymin, ymax) ax.set_xlim(xmin, xmax) # In[ ]: sds = numpy.ones((512, 1024), dtype=int) sds[255, :] = 2 sds[256, :] = 2 sdp = numpy.cumsum(sds, axis = 0) print(sdp[-1,-1]) # In[ ]: fds = numpy.ones((512, 1024), dtype=int) fds[:, 127] = 2 fds[:, 128] = 2 fds[:, 255] = 2 fds[:, 256] = 2 fds[:, 383] = 2 fds[:, 384] = 2 fdp = numpy.cumsum(fds, axis = 1) print(fdp[-1,-1]) # In[ ]: ls /scisoft/users/kieffer/workspace/pyFAI/testimages/ # In[ ]: import pyFAI, fabio ai = pyFAI.load("/scisoft/users/kieffer/workspace/pyFAI/testimages/Pilatus6M.poni") img = fabio.open("/scisoft/users/kieffer/workspace/pyFAI/testimages/Pilatus6M.cbf").data from pyFAI.gui import jupyter jupyter.display(img) # In[ ]: amorphous, bragg = ai.separate(img) # In[ ]: jupyter.display(bragg[500:1000,500:1000]) # In[ ]: jupyter.display(amorphous[500:1000,500:1000]) # In[ ]: #Pedastal calculation using the GPU ! import pyopencl from pyopencl import array as cla ctx = pyopencl.create_some_context(interactive=True) queue = pyopencl.CommandQueue(ctx) shape = 16*1024,1024 frame_d = cla.empty(queue, shape[-2:], dtype="uint16") bits = 14 ngains = 1<<(16-bits) print((ngains, shape[-2], shape[-1])) count_d = cla.empty(queue, (ngains, shape[-2], shape[-1]), dtype="uint32") sum_d = cla.empty(queue, (ngains, shape[-2], shape[-1]), dtype="float32") M2_d = cla.empty(queue, (ngains, shape[-2], shape[-1]), dtype="float32") get_ipython().run_line_magic('load_ext', 'pyopencl.ipython_ext') ctx # In[ ]: get_ipython().run_cell_magic('cl_kernel', '', 'kernel void memset_uint32(global unsigned int *ary, const int ngains, const int height, const int width)\n{\n uint col = get_global_id(0);\n uint line = get_global_id(1);\n if ((col>bits;\n if (gain==3) gain=2;\n ushort mask = ((1<>14;\n value &= (1<<14)-1;\n g = (g==3)?2:g;\n uint read_at = g*size + idx;\n float gain_value = gain[read_at];\n result[idx] = (gain_value==0.0f)?0.0f:(value - pedestal[read_at]) / gain_value;\n }\n}\n') # In[ ]: get_ipython().run_cell_magic('timeit', '', 'W=1024\nevt=memset_float32(queue, (shape[-1], shape[-2]), (W, 1), sum_d.data, \n numpy.int32(ngains), numpy.int32(shape[-2]), numpy.int32(shape[-1]))\nevt=memset_uint32(queue, (shape[-1], shape[-2]), (W,1), count_d.data, \n numpy.int32(ngains), numpy.int32(shape[-2]), numpy.int32(shape[-1]))\nevt=memset_float32(queue, (shape[-1], shape[-2]), (W,1), M2_d.data, \n numpy.int32(ngains), numpy.int32(shape[-2]), numpy.int32(shape[-1]))\n\nevt.wait()\n') # In[ ]: get_ipython().run_cell_magic('time', '', 'filename = ped_file\nstart = 1535\nstop = -1\ncnt = 0\nW=1024\nprint(filename)\nevt=memset_float32(queue, (shape[-1], shape[-2]), (W, 1), sum_d.data, \n numpy.int32(ngains), numpy.int32(shape[-2]), numpy.int32(shape[-1]))\nevt=memset_uint32(queue, (shape[-1], shape[-2]), (W,1), count_d.data, \n numpy.int32(ngains), numpy.int32(shape[-2]), numpy.int32(shape[-1]))\nevt=memset_float32(queue, (shape[-1], shape[-2]), (W,1), M2_d.data, \n numpy.int32(ngains), numpy.int32(shape[-2]), numpy.int32(shape[-1]))\n\nwith h5py.File(filename, mode="r") as h:\n k = list(h["data"].keys())[0]\n ds = h["data/%s/data"%k]\n print(ds.shape)\n if stop == -1:\n stop = ds.shape[0]\n print(start,stop)\n for i in range(start, stop):\n frame_d.set(ds[i])\n cnt+=1\n feed(queue, (shape[-1], shape[-2]), (W, 1),\n frame_d.data,\n numpy.uint32(bits),\n numpy.uint32(ngains), \n numpy.int32(shape[-2]), \n numpy.int32(shape[-1]),\n sum_d.data,\n M2_d.data,\n count_d.data)\nprint(cnt)\n') # In[ ]: pedestal = (sum_d.get()/count_d.get()).astype("float32") print(pedestal.shape, pedestal.dtype) # ## Predestal correction # # With implementation on CPU (parallel cython) and GPU (OpenCL) # # In[ ]: with h5py.File(img_files, "r") as f: k = list(f["data"].keys())[0] raw = f["data/%s/data"%k][...] gain = gain.astype("float32") print(raw.shape, raw.dtype) print(gain.shape, gain.dtype) # with h5py.File("pedestal_20190324_avg_bs2.h5", "r") as f: # pedestal = f["data/mean"][...] # print(pedestal.shape, pedestal.dtype) # In[ ]: get_ipython().run_line_magic('load_ext', 'Cython') # In[ ]: get_ipython().run_cell_magic('cython', '--compile-args=-fopenmp --link-args=-fopenmp', '\nimport cython\nimport h5py\nimport numpy\ncimport numpy as cnumpy\nfrom cython.parallel import prange\n\ncdef class StatAccumulator:\n cdef:\n readonly int bits\n readonly int frames\n readonly int height\n readonly int width\n readonly cnumpy.uint32_t[:, :, ::1] count # gain is first index\n readonly cnumpy.float64_t[:, :,::1] sum, M2 \n def __cinit__(self, shape, int bits=14):\n self.bits = bits\n ngains = 1<<(16-bits)\n self.frames = 0\n self.height = shape[0]\n self.width = shape[-1]\n self.count = numpy.zeros([ngains, self.height, self.width], dtype=numpy.uint32)\n self.sum = numpy.zeros([ngains, self.height, self.width], dtype=numpy.float64)\n self.M2 = numpy.zeros([ngains, self.height, self.width], dtype=numpy.float64)\n \n def __deallocate(self):\n self.count = None\n self.summ = None\n self.M2 = None\n \n @cython.wraparound(False)\n @cython.boundscheck(False)\n @cython.initializedcheck(False)\n @cython.cdivision(True)\n def feed(self, cnumpy.uint16_t[:, ::1] frame):\n cdef:\n int gain, i, j, mask\n int value, trimmed, cnt\n double to_store, sm, delta\n \n mask = (1<>self.bits\n trimmed = value & mask\n to_store = trimmed\n cnt = self.count[gain, i, j] + 1 \n self.count[gain, i, j] = cnt\n sm = self.sum[gain, i, j] + to_store\n self.sum[gain, i, j] = sm \n delta = (sm/cnt)-to_store\n self.M2[gain, i, j] = self.M2[gain, i, j] + delta*delta*(cnt-1)/cnt\n \n def stats(self):\n mean = numpy.asarray(self.sum)/numpy.asarray(self.count)\n std = numpy.sqrt(numpy.asarray(self.M2)/(numpy.asarray(self.count)-1))\n return mean, std\n\n@cython.wraparound(False)\n@cython.boundscheck(False)\n@cython.initializedcheck(False)\n@cython.cdivision(True)\ndef correct_frame(cnumpy.uint16_t[:,::1] raw, \n cython.floating[:, :,::1] gain, \n cython.floating[:, :,::1] pedestal):\n cdef:\n int n, m, g, i, j, value, mask, nbits=14\n cnumpy.float32_t[:, ::1] result\n cython.floating gain_value\n m = raw.shape[0]\n n = raw.shape[1]\n mask = (1<> nbits\n if g == 3:\n g = 2\n gain_value = gain[g, i, j]\n result[i, j] = ((value&mask) - pedestal[g, i, j]) / gain_value if gain_value else 0.0\n return numpy.asarray(result)\n') # In[ ]: get_ipython().run_cell_magic('time', '', 'with h5py.File(prefix+"lysoembl_32_1153_intensity_cor_gzip.h5", "w") as h5:\n data_grp = h5.require_group("data")\n ds = data_grp.require_dataset("data", \n shape=raw.shape, \n chunks=(1,)+raw.shape[1:], \n compression="gzip",\n compression_opts=2,\n shuffle=True,\n dtype=numpy.float32)\n for idx, frame in enumerate(raw):\n ds[idx] = correct_frame(frame, gain, pedestal)\n') # In[ ]: get_ipython().run_cell_magic('time', '', 'import bitshuffle.h5\nwith h5py.File(prefix+"lysoembl_32_1153_intensity_cor_bslz4.h5", "w") as h5:\n data_grp = h5.require_group("data")\n ds = data_grp.require_dataset("data", \n shape=raw.shape, \n chunks=(1,)+raw.shape[1:], \n compression=bitshuffle.h5.H5FILTER,\n compression_opts=(0, bitshuffle.h5.H5_COMPRESS_LZ4),\n dtype=numpy.float32)\n for idx, frame in enumerate(raw):\n ds[idx] = correct_frame(frame, gain, pedestal)\n') # In[ ]: get_ipython().run_cell_magic('cl_kernel', '', '\n#define BITS_MANTISSA_FLOAT 23\n\ninline float truncate_precision32(float value, \n uchar prec_bits) \n{\n union{ float f; uint u;} work;\n work.f = value;\n if (prec_bits < BITS_MANTISSA_FLOAT)\n {\n int zeroed_bits = BITS_MANTISSA_FLOAT - prec_bits;\n uint mask = ~((1 << zeroed_bits) - 1);\n work.u &= mask;\n }\n return work.f;\n}\n\ninline float correct_pedestal(global ushort *raw,\n global float *gain,\n global float *pedestal,\n uint size)\n{\n uint idx = get_global_id(0);\n float result = 0.0f;\n if (idx>14;\n value &= (1<<14)-1;\n g = (g==3)?2:g;\n uint read_at = g*size + idx;\n float gain_value = gain[read_at];\n if (gain_value!=0.0f)\n result = (value - pedestal[read_at]) / gain_value;\n }\n return result;\n}\n\n\nkernel void ocl_pedestal_simple( global ushort *raw,\n global float *gain,\n global float *pedestal,\n global float *result,\n uint size)\n{\n uint idx = get_global_id(0);\n if (idx0 # In[189]: fig,ax = subplots() ax.imshow(valid) # In[176]: minimum # In[177]: corrected_d.get() # In[201]: nb_bragg = [] nsigma = 6 for idx, frame in enumerate(raw): frame_d.set(frame) ocl_pedestal_cutoff(queue, (frame.size,), (1024,), frame_d.data, gains_d.data, pedestal_d.data, error_d.data, corrected_d.data, numpy.uint32(frame.size), numpy.uint32(nsigma)) corrected = corrected_d.get() res = ai.sigma_clip(corrected, 1024, 512, unit="r_mm",method="csr_nosplit_gpu") minimum = ai.calcfrom1d(res[0], res[1]+nsigma*res[2], dim1_unit="r_mm") valid = numpy.nansum(corrected_d.get() - minimum) nb_bragg.append(valid) if idx%100==0: print(idx) # In[202]: fig, ax = subplots() ax.plot(nb_bragg) # In[192]: valid # In[200]: numpy.nansum(corrected_d.get() - minimum) # In[196]: corrected_d.get().sum() # In[198]: # In[203]: pyFAI.version # In[ ]: