Example RGB to YUV conversion (simplified, not clamping)
Note Not verified for correct conversion, concept study only
Makes use of the VectorSig
data type and the simple @pipeline
decorator.
!pip install numpy > /dev/null
Import video types:
from video.color import *
from video.videotypes import *
Import pipeline and target auxiliaries:
from myirl.library.pipeline import *
from myirl import targets
Construct the conversion matrix, in this case for JPEG-compliant YCrCb:
CLAMP = False
LEVELSHIFT = False
BPP = 8
FRACT_SIZE = 16
CALCSIZE = FRACT_SIZE + BPP
SATURATION_VALUE_MAX = 127 # YUV maximum value (saturation)
SATURATION_VALUE_MIN = -128 # YUV minimum value (saturation)
# Signed matrix entries:
Y_FROM_RGB = vector_to_fp(FRACT_SIZE, 1, mat_jpeg_rgb2yuv[0])
U_FROM_RGB = vector_to_fp(FRACT_SIZE, 1, mat_jpeg_rgb2yuv[1])
V_FROM_RGB = vector_to_fp(FRACT_SIZE, 1, mat_jpeg_rgb2yuv[2])
def F(x, s = FRACT_SIZE):
return intbv(x)[s:]
YUV_SLICE = slice(CALCSIZE-1, CALCSIZE-1 - BPP)
MATRIX = [
[ F(Y_FROM_RGB[i]) for i in range(3) ],
[ F(U_FROM_RGB[i]) for i in range(3) ],
[ F(V_FROM_RGB[i]) for i in range(3) ]
]
from myirl.vector import VectorSignal
I = lambda x: ( x[i]._val for i in range(3) )
# @bulkwrapper()
# class RGBParam:
# def __init__(self):
# self.y = VectorSig(3, MATRIX[0], initializer = I(MATRIX[0]))
# self.u = VectorSig(3, MATRIX[1], initializer = I(MATRIX[1]))
# self.v = VectorSig(3, MATRIX[1], initializer = I(MATRIX[2]))
MATRIX
from myirl import simulation as sim
from myirl.test.common_test import gen_osc
@block
def video_rgb_yuv(clk : ClkSignal,
vin : VideoPort,
rgb : Signal,
param_matrix : list,
vout : VideoPort.Output,
yuv : Signal.Output,):
"""RGB to full range YUV422 converter, manual pipeline inference"""
py, pu, pv = [
VectorSignal(3, F(0), initializer = I(param_matrix[i]), name = "p_coef%d" % i) \
for i in range(3)
]
# Use initializers:
py._init = True
pu._init = True
pv._init = True
valid = Signal(bool())
rgb_v = VectorSignal(3, FractUnsigned(0, BPP), name = 'rgbv')
a = VectorSignal(3, FractSigned(0, CALCSIZE+2), name = "add_res")
y = VectorSignal(3, FractUnsigned(0, CALCSIZE), name = "ydata")
u, v = [ VectorSignal(3, FractSigned(0, CALCSIZE+1), name = n) for n in ['udata', 'vdata'] ]
# Wire up input RGB video:
wires = []
for i in range(3):
j = 3 - i
wires.append(rgb_v[i].wireup(rgb[j*BPP:(j-1)*BPP]))
# Predefine YUV slices
yuv_slices = (a[i][YUV_SLICE] for i in range(3) )
wires += [
yuv.wireup(
concat(*yuv_slices)
)
]
@pipeline(clk, None, ce = vin.dval, pass_in = vin, pass_out = vout)
def yuv_pipe(ctx):
"""This contains the two-stage transformation for the RGB-YUV matrix.
Because it's a vector signal, we can use HDL notation (<=)"""
yield [
y <= (py * rgb_v),
u <= (pu.signed() * rgb_v),
v <= (pv.signed() * rgb_v)
]
# Create sum expressions for readability:
_y, _u, _v = (i.sum() for i in [y, u, v])
yield [
a[0].set(_y.signed()),
a[1].set(_u),
a[2].set(_v)
]
return locals()
from myirl.targets import VHDL
from myirl.test.common_test import run_ghdl
d = DesignModule("top", debug = True)
@component(d)
def testbench_rgb2yuv():
clk = ClkSignal(name = "pclk")
yuv = Signal(intbv(0)[3*BPP:])
vint, vout = [VideoPort() for _ in range(2)]
yuv = Signal(intbv(0)[3*BPP:], name = 'yuv_data')
rgb = Signal(intbv(0)[3*BPP:], name = 'rgb_data')
inst = video_rgb_yuv(clk = clk,
vin = vint,
rgb = rgb,
param_matrix = MATRIX,
vout = vout,
yuv = yuv
)
osc = gen_osc(clk, CYCLE = 5)
@sim.generator
def stimulus():
# Feed a few color values:
values = sim.Iterator([0x00ffff, 0x7f7f7f, 0x008300, 0x1a840a])
yield [
vint.dval.set(False), vint.fval.set(True), vint.lval.set(True),
sim.wait(4 * [ clk.posedge, ] ),
vint.dval.set(True),
sim.For(values)(
sim.wait('1 ns'),
rgb.set(values),
sim.wait(2 * [clk.posedge]),
sim.print_(yuv),
),
sim.wait(3 * [ clk.posedge, ] ),
sim.assert_(vout.dval == True, "Video not valid"),
]
for _ in range(3):
yield [
sim.print_(yuv),
sim.wait(clk.posedge),
]
yield [
sim.raise_(sim.StopSimulation)
]
return locals()
def test():
tb = testbench_rgb2yuv()
files = tb.elab(VHDL, elab_all = True)
run_ghdl(files, tb, debug = True, vcdfile="yuv.vcd")
return files, tb
files, tb = test()
import wavedraw; import nbwavedrom
TB = tb.name;
waveform = wavedraw.vcd2wave("yuv.vcd", TB + '.pclk', None)
nbwavedrom.draw(waveform)
Download VCD trace yuv.vcd
!cat -n {files[0]}
Using numpy, we can run our samples through the floating point matrix as well:
v = numpy.matrix(mat_jpeg_rgb2yuv)
rgb = numpy.matrix([ (127, 127, 127), (0, 255, 255), (0, 0x83, 0)]).T
yuv = v * rgb
g = lambda x: "%02x" % (int(x) & 0xff)
f = numpy.vectorize(g)
f(yuv.T)
We note that the results don't entirely match. Why?