Especificação



Baixar 2,81 Mb.
Página15/24
Encontro01.07.2018
Tamanho2,81 Mb.
1   ...   11   12   13   14   15   16   17   18   ...   24

A6 Classes de codificação 3 – Modos de codificação




      1. A6.1 Interface EncodingMode



package br.ufsc.inf.guiga.media.codec.video.h264.vcl.mode;
import java.io.OutputStream;
import br.ufsc.inf.guiga.media.codec.video.h264.vcl.FrameBuffer;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.Macroblock;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.datatype.MacroblockType;

import br.ufsc.inf.guiga.media.parser.video.YUVFrameBuffer;

import br.ufsc.inf.guiga.media.util.io.H264EntropyOutputStream;
/**

* This class represents one {@link Macroblock} coding mode.

*

* @author Guilherme Ferreira



*/

public interface EncodingMode {
/**

* Encode the macroblock using this {@link EncodingMode}.

*

* @param inFrameBuffer the {@link FrameBuffer} that contains the macroblock data to



* be encoded.

* @param outFrameBuffer the {@link FrameBuffer} that will hold the coded data.

*/

public void encode(YUVFrameBuffer inFrameBuffer, YUVFrameBuffer outFrameBuffer);
/**

* Call this method to make this encoding mode writes encoded data into the

* {@link FrameBuffer}.

*


* Every coded macroblock in an H.264 slice is predicted from previously-encoded data.

* Samples within an intra macroblock are predicted from samples in the current slice

* that have already been encoded, decoded and reconstructed; samples in an inter

* macroblock are predicted from previously-encoded.

*

* @param outFrameBuffer the {@link FrameBuffer} that will hold the coded data. This



* data will be used by the next macroblocks.

*/

public void reconstruct(YUVFrameBuffer outFrameBuffer);


/**

* Calculate the Sum of Squared Differences (SSD):

*

*


* SSD = Sum(i=0 to M){ Sum(j=0 to N){ (Ori(i,j) - Dec(i,j))ˆ2 }}

*


*

* @return the Sum of Squared Differences of the macroblock.

*/

public int getDistortion();
/**

* Writes the macroblock coded data to the {@link OutputStream}.

*

* @param outStream the {@link OutputStream} where the macroblock encoded will be



* write.

*/

public void write(H264EntropyOutputStream outStream);


/**

* The type of the macroblock depends on the encode mode that this object represents.

*

* @return the {@link MacroblockType} of this encoding mode.



*/

public MacroblockType getMbType();
}

      1. A6.2 Classe AbstractEncodingMode



package br.ufsc.inf.guiga.media.codec.video.h264.vcl.mode;
import java.io.OutputStream;
import br.ufsc.inf.guiga.media.codec.video.h264.vcl.FrameBuffer;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.Macroblock;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.datatype.MacroblockType;

import br.ufsc.inf.guiga.media.parser.video.YUVFrameBuffer;

import br.ufsc.inf.guiga.media.util.io.H264EntropyOutputStream;
/**

* This superclass provides common services to encoding modes subclasses.

*

* @author Guilherme Ferreira



*/

public abstract class AbstractEncodingMode implements EncodingMode {
protected YUVFrameBuffer inputFrameBuffer; // raw YUV read frame

protected YUVFrameBuffer outputFrameBuffer; // encoded frame

protected Macroblock macroblock;

protected MacroblockType mbType;
/**

* @param macroblock the {@link Macroblock} to be encoded by this mode.

* @param mbType subclass provide this field to identify which encoding mode it represents.

*/

public AbstractEncodingMode(Macroblock macroblock, MacroblockType mbType) {



this.macroblock = macroblock;

this.mbType = mbType;

}
public void encode(YUVFrameBuffer inFrameBuffer, YUVFrameBuffer outFrameBuffer) {



this.inputFrameBuffer = inFrameBuffer;

this.outputFrameBuffer = outFrameBuffer;
doEncode(inFrameBuffer, outFrameBuffer);

}
public int getDistortion() {



int distortion = 0;

int x = macroblock.getPixelX();

int y = macroblock.getPixelY();

int cx = macroblock.getPixelChromaX();

int cy = macroblock.getPixelChromaY();
// LUMA

for (int j = y; j < y + Macroblock.MB_HEIGHT; j++) {

for (int i = x; i < x + Macroblock.MB_WIDTH; i++)

distortion += Math.pow(inputFrameBuffer.getY8bit(i, j)

- outputFrameBuffer.getY8bit(i, j), 2);

}
// CHROMA



for (int j = cy; j < cy + Macroblock.MB_CHROMA_HEIGHT; j++) {

for (int i = cx; i < cx + Macroblock.MB_CHROMA_WIDTH; i++) {

distortion += Math.pow(inputFrameBuffer.getCb8bit(i, j)

- outputFrameBuffer.getCb8bit(i, j), 2);

distortion += Math.pow(inputFrameBuffer.getCr8bit(i, j)

- outputFrameBuffer.getCr8bit(i, j), 2);

}

}


return distortion;

}
public void write(H264EntropyOutputStream outStream) {

doWrite(outStream);

}
public MacroblockType getMbType() {



return mbType;

}
/**

* @return the {@link Macroblock} which owns this encoding mode.

*/

public Macroblock getMacroblock() {



return macroblock;

}
/**

* Subclasses must implement this method to encode the macroblock data.

*


* Important:The coded data shall not be placed on the output buffer until the

* {@link EncodingMode#reconstruct(YUVFrameBuffer)} method on this mode be called. The

* output buffer is passed here to provide previously coded data from neighbours

* macroblocks.

*

* @param inFrameBuffer the {@link FrameBuffer} that contains the macroblock data to be



* encoded.

* @param codedFrameBuffer the {@link FrameBuffer} with previously coded macroblocks data

* for prediction use only.

*/

protected abstract void doEncode(

YUVFrameBuffer inFrameBuffer, YUVFrameBuffer codedFrameBuffer);
/**

* Subclasses must implement this method in order to write their coded data into the

* stream.

*

* @param outStream the {@link OutputStream} where the macroblock encoded will be write.



*/

protected abstract void doWrite(H264EntropyOutputStream outStream);
}

      1. A6.3 Classe Intra16x16EncodingMode



package br.ufsc.inf.guiga.media.codec.video.h264.vcl.mode;
import java.util.ArrayList;

import java.util.List;
import br.ufsc.inf.guiga.media.Global;

import br.ufsc.inf.guiga.media.codec.video.h264.BaselineProfileFactory;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.Macroblock;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.algorithm.AlgorithmFactory;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.datatype.MacroblockType;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.mode.prediction.Intra16x16LumaDCPredictor;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.mode.prediction.Intra16x16LumaHorizontalPredictor;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.mode.prediction.Intra16x16LumaPlanePredictor;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.mode.prediction.Intra16x16LumaVerticalPredictor;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.mode.prediction.Intra8x8ChromaDCPredictor;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.mode.prediction.Intra8x8ChromaHorizontalPredictor;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.mode.prediction.Intra8x8ChromaPlanePredictor;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.mode.prediction.Intra8x8ChromaVerticalPredictor;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.mode.prediction.IntraPredictor;

import br.ufsc.inf.guiga.media.parser.video.YUVFrameBuffer;

import br.ufsc.inf.guiga.media.util.io.H264EntropyOutputStream;
/**

* Basically, this class selects the best luma intra 16x16 prediction mode.

*

* @author Guilherme Ferreira



*/

public class Intra16x16EncodingMode extends AbstractEncodingMode {
protected IntraPredictor[] lumaModes;

protected IntraPredictor[] chromaModes;

protected IntraPredictor bestLumaMode;

protected IntraPredictor bestChromaMode;

protected int bestLumaModeIdx;

protected int bestChromaModeIdx;

protected List successPredLumaModes;

protected List successPredChromaModes;
// coded_block_pattern: indicates which blocks within a macroblock contain

// non-zero transform coefficient levels



int codedBlockPattern;
public Intra16x16EncodingMode(Macroblock macroblock) {

super(macroblock, MacroblockType.I16MB);
codedBlockPattern = 0;
int x = macroblock.getPixelX();

int y = macroblock.getPixelY();

int xc = macroblock.getPixelChromaX();

int yc = macroblock.getPixelChromaY();

int qp = Global.getInstance().getH264Control().getQuantizerParameter();
AlgorithmFactory algorithms = new BaselineProfileFactory(qp);
// creates the four luma intra 16x16 prediction modes

lumaModes = new IntraPredictor[4];

lumaModes[0] = new Intra16x16LumaVerticalPredictor(x, y, macroblock, algorithms);

lumaModes[1] = new Intra16x16LumaHorizontalPredictor(x, y, macroblock, algorithms);

lumaModes[2] = new Intra16x16LumaDCPredictor(x, y, macroblock, algorithms);

lumaModes[3] = new Intra16x16LumaPlanePredictor(x, y, macroblock, algorithms);


// creates the four chroma intra 8x8 prediction modes

// FIXME: Each Intra Encoding Mode (i.e. 4x4, 8x8) compute the chroma

// prediction, which is waste of time. Although, JM14 does chroma coding

// for each intra mode too (I16MB and I4MB).

chromaModes = new IntraPredictor[4];

chromaModes[0] = new Intra8x8ChromaDCPredictor(xc, yc, macroblock, algorithms);

chromaModes[1] = new Intra8x8ChromaHorizontalPredictor(xc, yc, macroblock,

algorithms);

chromaModes[2] = new Intra8x8ChromaVerticalPredictor(xc, yc, macroblock,

algorithms);

chromaModes[3] = new Intra8x8ChromaPlanePredictor(xc, yc, macroblock, algorithms);
// this list avoids to compute distortion on unsuccessful encoding modes

successPredLumaModes = new ArrayList();

successPredChromaModes = new ArrayList();

}
public void reconstruct(YUVFrameBuffer outFrameBuffer) {

// Reconstruct the pixels from the transformed mode, once the next

// macroblocks use this reconstructed samples for prediction, we ensure

// that decoder and encoder use the same samples, avoiding error

// propagation.

bestLumaMode.reconstruct(outFrameBuffer);

bestChromaMode.reconstruct(outFrameBuffer);

}
protected void doEncode(YUVFrameBuffer inFrameBuffer, YUVFrameBuffer codedFrameBuffer)

{

// generate the luma intra prediction samples for all four 16x16 modes



for (int i = 0; i < lumaModes.length; i++) {

if (lumaModes[i].predict(inFrameBuffer, codedFrameBuffer)) {

successPredLumaModes.add(i);

}

}

// get the best luma intra 16x16 mode



int bestSAD = Integer.MAX_VALUE;

int currentSAD = 0;

for (Integer i : successPredLumaModes) {

currentSAD = lumaModes[i].getDistortion();



if (currentSAD < bestSAD) {

bestSAD = currentSAD;

bestLumaMode = lumaModes[i];

bestLumaModeIdx = i;

}

}

// encode the chosen intra luma mode



codedBlockPattern += bestLumaMode.encode(inFrameBuffer, codedFrameBuffer);
// generates the chroma intra prediction samples for all four 8x8 modes

for (int i = 0; i < chromaModes.length; i++) {

if (chromaModes[i].predict(inFrameBuffer, codedFrameBuffer)) {

successPredChromaModes.add(i);

}

}

// get the best chroma intra 8x8 mode



bestSAD = Integer.MAX_VALUE;

currentSAD = 0;



for (Integer i : successPredChromaModes) {

currentSAD = chromaModes[i].getDistortion();



if (currentSAD < bestSAD) {

bestSAD = currentSAD;

bestChromaMode = chromaModes[i];

bestChromaModeIdx = i;

}

}
// encode the chosen intra chroma mode



codedBlockPattern += bestChromaMode.encode(inFrameBuffer, codedFrameBuffer);
releaseUnusedModes();

}
protected void doWrite(H264EntropyOutputStream outStream) {

// mb_type

int cbpL = codedBlockPattern % 16;

int cbpC = codedBlockPattern / 16;

int mb_type = 1 + bestLumaModeIdx + 4 * cbpC + (((cbpL != 0) ? 1 : 0) * 12);

outStream.writeMacroblockType(mb_type);


// intra_chroma_pred_mode

int intra_chroma_pred_mode = bestChromaModeIdx;

outStream.writeIntraChromaPredMode(intra_chroma_pred_mode);


// mb_qp_delta

int mb_qp_delta = 0; // TODO this.delta_qp = this.qp - prev.qp;

outStream.writeMackoblockQpDelta(mb_qp_delta);


// residual_luma()

// -> residual_block_cavlc( i16x16DClevel, 0, 15, 16 )

// -> residual_block_cavlc( i16x16AClevel[i8x8*4+ i4x4], ...)

bestLumaMode.write(outStream, codedBlockPattern);


// residual_block_cavlc( ChromaDCLevel[ iCbCr ], ... )

// residual_block_cavlc( ChromaACLevel[ iCbCr ][ i8x8*4+i4x4 ], ...)

bestChromaMode.write(outStream, codedBlockPattern);

}
/**

* Unreferenced the modes for garbage collector save memory. Only the modes referenced

* by bestXXMode will remain in memory...at least I hope so.

*/

private void releaseUnusedModes() {

for (int i = 0; i < lumaModes.length; i++) {

lumaModes[i] = null;

}

lumaModes = null;



for (int i = 0; i < chromaModes.length; i++) {

chromaModes[i] = null;

}

chromaModes = null;



}
}

      1. A6.4 Classe IPCMEncodingMode



package br.ufsc.inf.guiga.media.codec.video.h264.vcl.mode;
import br.ufsc.inf.guiga.media.codec.video.h264.vcl.Macroblock;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.datatype.MacroblockType;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.datatype.SliceType;

import br.ufsc.inf.guiga.media.parser.video.YUVFrameBuffer;

import br.ufsc.inf.guiga.media.util.io.H264EntropyOutputStream;
/**

* I_PCM is an Intra coding mode that enables an encoder to transmit the values of the

* image samples directly (without prediction or transformation). In some special cases

* (e.g. anomalous image content and/or very low quantizer parameters), this mode may be

* more efficient than the ‘usual’ process of intra prediction, transformation,

* quantization and entropy coding.

*

* The I_PCM option also makes it possible to place an absolute limit on the number of

* bits that may be contained in a coded macroblock without constraining decoded image

* quality.

*

* @author Guilherme Ferreira



*/

public class IPCMEncodingMode extends AbstractEncodingMode {
int[][] bufferY;

int[][] bufferU;

int[][] bufferV;
public IPCMEncodingMode(Macroblock macroblock) {

super(macroblock, MacroblockType.IPCM);
bufferY = new int[Macroblock.MB_WIDTH][Macroblock.MB_HEIGHT];

bufferU = new int[Macroblock.MB_CHROMA_WIDTH][Macroblock.MB_CHROMA_HEIGHT];

bufferV = new int[Macroblock.MB_CHROMA_WIDTH][Macroblock.MB_CHROMA_HEIGHT];

}
public void reconstruct(YUVFrameBuffer outFrameBuffer) {



int luma = 0, chromaU, chromaV;

int x = macroblock.getPixelX();

int y = macroblock.getPixelY();

int cx = macroblock.getPixelChromaX();

int cy = macroblock.getPixelChromaY();
// write the I_PCM luma pixels

for (int j = 0; j < Macroblock.MB_HEIGHT; j++) {

for (int i = 0; i < Macroblock.MB_WIDTH; i++) {

luma = bufferY[i][j];

outFrameBuffer.setY8bit(i + x, j + y, luma);

}

}


// write the I_PCM chroma pixels

for (int j = 0; j < Macroblock.MB_CHROMA_HEIGHT; j++) {

for (int i = 0; i < Macroblock.MB_CHROMA_WIDTH; i++) {

chromaU = bufferU[i][j];

outFrameBuffer.setCb8bit(i + cx, j + cy, chromaU);

}

}



for (int j = 0; j < Macroblock.MB_CHROMA_HEIGHT; j++) {

for (int i = 0; i < Macroblock.MB_CHROMA_WIDTH; i++) {

chromaV = bufferV[i][j];

outFrameBuffer.setCr8bit(i + cx, j + cy, chromaV);

}

}


}
protected void doEncode(YUVFrameBuffer inFrameBuffer, YUVFrameBuffer codedFrameBuffer)

{

int luma, chromaU, chromaV;



int x = macroblock.getPixelX();

int y = macroblock.getPixelY();

int cx = macroblock.getPixelChromaX();

int cy = macroblock.getPixelChromaY();
// LUMA

for (int j = 0; j < Macroblock.MB_HEIGHT; j++) {

for (int i = 0; i < Macroblock.MB_WIDTH; i++) {

luma = inputFrameBuffer.getY8bit(i + x, j + y);

bufferY[i][j] = luma;

}

}



// CHROMA

for (int j = 0; j < Macroblock.MB_CHROMA_HEIGHT; j++) {

for (int i = 0; i < Macroblock.MB_CHROMA_WIDTH; i++) {

chromaU = inputFrameBuffer.getCb8bit(i + cx, j + cy);

chromaV = inputFrameBuffer.getCr8bit(i + cx, j + cy);
bufferU[i][j] = chromaU;

bufferV[i][j] = chromaV;

}

}

}


protected void doWrite(H264EntropyOutputStream outStream) {

int luma = 0, chromaU, chromaV;

int bitDepth = 8; // TODO get it automatically
// mb_type

if (macroblock.getSlice().getSliceType().equals(SliceType.I_SLICE)) {

outStream.writeMacroblockType(25);

} else {

outStream.writeMacroblockType(31);

}
// pcm_alignment_zero_bit (align the byte)

outStream.flush();


// write the I_PCM luma pixels

for (int j = 0; j < Macroblock.MB_HEIGHT; j++) {

for (int i = 0; i < Macroblock.MB_WIDTH; i++) {

luma = bufferY[i][j];

outStream.write_u_v(bitDepth, luma);

}

}


// write the I_PCM chroma pixels

for (int j = 0; j < Macroblock.MB_CHROMA_HEIGHT; j++) {

for (int i = 0; i < Macroblock.MB_CHROMA_WIDTH; i++) {

chromaU = bufferU[i][j];

outStream.write_u_v(bitDepth, chromaU);

}

}



for (int j = 0; j < Macroblock.MB_CHROMA_HEIGHT; j++) {

for (int i = 0; i < Macroblock.MB_CHROMA_WIDTH; i++) {

chromaV = bufferV[i][j];

outStream.write_u_v(bitDepth, chromaV);

}

}


}
}

      1. A6.5 Classe Intra16x16LumaAbstractPredictor



package br.ufsc.inf.guiga.media.codec.video.h264.vcl.mode.prediction;
import java.util.Arrays;
import br.ufsc.inf.guiga.media.Global;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.FrameBuffer;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.Macroblock;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.algorithm.AlgorithmFactory;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.algorithm.Quantizer;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.algorithm.Scanner;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.algorithm.Transform;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.algorithm.Quantizer.QuantizerSummary;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.block.ResidualBlockInfo;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.datatype.ResidualBlockType;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.macroblock.MacroblockAccess;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.macroblock.MacroblockInfo;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.mode.decision.DistortionMetric;

import br.ufsc.inf.guiga.media.parser.video.YUVFrameBuffer;

import br.ufsc.inf.guiga.media.util.SupportMath;

import br.ufsc.inf.guiga.media.util.io.H264EntropyOutputStream;
/**

* This class provides common services for Intra 16x16 Luma Prediction Modes.

*

* @author Guilherme Ferreira



*/

public abstract class Intra16x16LumaAbstractPredictor implements IntraPredictor {

protected YUVFrameBuffer inputFrameBuffer; // raw YUV frame

protected YUVFrameBuffer outputFrameBuffer; // encoded frame
protected int x; // macroblock left up corner x0

protected int y; // macroblock left up corner y0

// macroblock dimensions



protected int mbWidth = Macroblock.MB_WIDTH;

protected int mbHeight = Macroblock.MB_HEIGHT;

// number of DC coefficients (4x4)



protected int dcWidth = 4;

protected int dcHeight = 4;
protected int mOrig[][]; // macroblock original samples (16x16 samples)

protected int mResd[][]; // macroblock transformed residual samples (16x16 samples)

protected int mPred[][]; // macroblock predicted samples (16x16 samples)

// Hadamard (4x4 DC coefficients)



protected int mDc[][]; // transformed and quantized coefficients
protected MacroblockAccess access;

protected MacroblockInfo info;

protected Transform transform;

protected Quantizer quantizer;

protected DistortionMetric distortion;

protected Scanner scanner;
protected static final int bitDepthY;

protected static final int maxImagePelValue;
static {

bitDepthY = Global.getInstance().getH264Control().getBitDepthLuma();

maxImagePelValue = (1 << bitDepthY) - 1; // 2 ^ bitDepth

}
/**

* @param x the macroblock upper left corner horizontal position.

* @param y the macroblock upper left corner vertical position.

* @param macroblock the {@link Macroblock} that allow access to the macroblock

* neighbours.

* @param algorithms the {@link AlgorithmFactory} for algorithms creation.

*/

public Intra16x16LumaAbstractPredictor(



int x,

int y,

Macroblock macroblock,

AlgorithmFactory algorithms)

{

this.x = x;



this.y = y;

this.access = macroblock.getMacroblockAccess();

this.info = macroblock.getMacroblockInfo();
transform = algorithms.createTransform();

quantizer = algorithms.createQuantizer();

distortion = algorithms.createDistortionMetric();

scanner = algorithms.createScanner();


mOrig = new int[mbHeight][mbWidth];

mResd = new int[mbHeight][mbWidth];

mPred = new int[mbHeight][mbWidth];

mDc = new int[dcHeight][dcWidth];

}
public boolean predict(

YUVFrameBuffer origiFrameBuffer, YUVFrameBuffer codedFrameBuffer)

{

// place the input frame data into a easy access matrix.



fillOriginalMatrix(origiFrameBuffer);
// Predict the samples.

return doIntraPrediction(codedFrameBuffer, mPred);

}
public int encode(YUVFrameBuffer inFrameBuffer, YUVFrameBuffer outFrameBuffer) {



this.inputFrameBuffer = inFrameBuffer;

this.outputFrameBuffer = outFrameBuffer;
// Fill the macroblock matrix with the residual samples, original minus

// predicted.

fillResidualMatrix();
// Apply transform and quantization on the residual samples and return

// the amount of non-zero coefficients



return forwardTransform(mResd, mDc, mResd);

}
public void reconstruct(YUVFrameBuffer outFrameBuffer) {

// In intra mode a prediction block P is formed based on previously

// encoded and reconstructed blocks.



int dq_bits = 6;

int residualRecons; // Reconstructed residual sample

int originalRecons; // Reconstructed original sample

int predicted; // Predicted sample
// Apply the inverse quantization and transform on residual coefficients

int[][] mrInv = new int[mbHeight][mbWidth];

inverseTransform(mDc, mResd, mrInv);


for (int j = 0; j < mbHeight; j++) {

int jj = y + j;
for (int i = 0; i < mbWidth; i++) {

int ii = x + i;
predicted = mPred[j][i];

residualRecons = SupportMath.rshiftRound(mrInv[j][i], dq_bits);

originalRecons = residualRecons + predicted;

originalRecons = SupportMath.clip(maxImagePelValue, originalRecons);

outFrameBuffer.setY8bit(ii, jj, originalRecons);

}

}


}
public void write(H264EntropyOutputStream outStream, int codedBlockPattern) {

int maxNumDcCoeff = 16;

int maxNumAcCoeff = 15; // the first level of each AC block is the DC

int[] coeffLevel = new int[maxNumDcCoeff];

int[] coeffRun = new int[maxNumDcCoeff];
// Get the information about this DC block

ResidualBlockInfo blockInfo = info.getBlockInfo(0, 0,

ResidualBlockType.Intra16x16LumaDCLevel);
// DC coefficients: Reorder the matrix

scanner.reorder4x4(mDc, coeffLevel, coeffRun, 0, maxNumDcCoeff, 0, 0);

// DC coefficients: Entropy code

outStream.writeResidualBlock(coeffLevel, coeffRun,

ResidualBlockType.Intra16x16LumaDCLevel, blockInfo);
// AC coefficients

if ((codedBlockPattern & 15) != 0) {

// write the four AC 4x4 sub-blocks of each 8x8 block



for (int i8x8 = 0; i8x8 < 4; i8x8++) {

for (int i4x4 = 0; i4x4 < 4; i4x4++) {

int block_y = 4 * (2 * (i8x8 >> 1) + (i4x4 >> 1));

int block_x = 4 * (2 * (i8x8 & 0x01) + (i4x4 & 0x01));
// Clean up the destination vectors

Arrays.fill(coeffLevel, 0);

Arrays.fill(coeffRun, 0);

// Reorder the matrix

scanner.reorder4x4(mResd, coeffLevel, coeffRun, 1, maxNumAcCoeff,

block_y, block_x);

// Get the information about this AC block

blockInfo = info.getBlockInfo(block_x >> 2, block_y >> 2,

ResidualBlockType.Intra16x16LumaACLevel);
// Entropy code

outStream.writeResidualBlock(coeffLevel, coeffRun,

ResidualBlockType.Intra16x16LumaACLevel, blockInfo);

}

}



}
}
public int getDistortion() {

return distortion.getDistortion16x16(mOrig, mPred);

}
/**

* Subclasses must implement this method in order to predict samples.

*

* @param codedFrameBuffer the {@link FrameBuffer} containing previously coded samples



* necessary for prediction.

* @param mPredCb the matrix where the predicted samples must be placed.

* @return true if the prediction was successfully made, or

* false if there were anything that prevented the prediction

* complete.

*/

protected abstract boolean doIntraPrediction(

YUVFrameBuffer codedFrameBuffer, int[][] mp);
/**

* Fill the matrix with the original frame samples.

*

* @param origiFrameBuffer



*/

private void fillOriginalMatrix(YUVFrameBuffer origiFrameBuffer) {

for (int j = 0; j < mbHeight; j++) {

int jj = y + j;

for (int i = 0; i < mbWidth; i++) {

int ii = x + i;

mOrig[j][i] = origiFrameBuffer.getY8bit(ii, jj);

}

}

}


/**

* Fill the matrix with the predicted residual samples.

*/

private void fillResidualMatrix() {

for (int j = 0; j < mbHeight; j++) {

for (int i = 0; i < mbWidth; i++) {

mResd[j][i] = mOrig[j][i] - mPred[j][i];

}

}

}


/**

* Applies a 16x16 luma transform and quantization on the residual samples.

*

* @param mrSrc residual coefficients



* @param m4Dst transformed hadamard coefficients

* @param mrDst transformed residual coefficients

* @return the amount of non-zero coefficients.

*/

private int forwardTransform(int[][] mrSrc, int[][] m4Dst, int[][] mrDst) {



int acCoeff = 0;
// Do a forward transform on each 4x4 residual block of this macroblock

for (int block_y = 0; block_y < mbHeight; block_y += 4) {

for (int block_x = 0; block_x < mbWidth; block_x += 4) {

transform.forward4x4(mrSrc, mrDst, block_y, block_x);

}

}
// Build a DC matrix with the DC coefficients from each 4x4 block



for (int j = 0; j < dcHeight; j++)

for (int i = 0; i < dcWidth; i++)

m4Dst[j][i] = mrDst[j << 2][i << 2];


// The DC coefficient of each 4x4 block is transformed again using a 4x4 Hadamard

// transform

transform.hadamard4x4(m4Dst, m4Dst);
// Quantize the DC matrix to produce a block of quantized DC coefficients

QuantizerSummary qs = quantizer.quantization4x4DC(m4Dst, m4Dst);


// Fulfill the info of this residual DC block

ResidualBlockInfo blockInfo = new ResidualBlockInfo(qs.nonZeroCoeff);

info.setBlockInfo(0, 0, ResidualBlockType.Intra16x16LumaDCLevel, blockInfo);
// Quantize AC coefficients

for (int block_y = 0; block_y < mbHeight; block_y += 4) {

for (int block_x = 0; block_x < mbHeight; block_x += 4) {

qs = quantizer.quantization4x4AC(mrDst, mrDst, block_y, block_x);


// Fulfill the info of this residual AC block

blockInfo = new ResidualBlockInfo(qs.nonZeroCoeff);

info.setBlockInfo(block_x >> 2, block_y >> 2,

ResidualBlockType.Intra16x16LumaACLevel, blockInfo);


if (qs.nonZeroCoeff > 0) {

acCoeff = 15;

}

}

}


return acCoeff;

}
/**

* Applies a 16x16 luma inverse transform and quantization on the residual samples.

*

* @param m4Src transformed hadamard coefficients



* @param mrSrc transformed residual coefficients

* @param mrDst inverse transformed residual coefficients

*/

private void inverseTransform(int[][] m4Src, int[][] mrSrc, int[][] mrDst) {

int[][] m4Inv = new int[dcHeight][dcWidth];
// Copy the source into destination matrix

for (int j = 0; j < mbHeight; j++)

for (int i = 0; i < mbWidth; i++)

mrDst[j][i] = mrSrc[j][i];


// Apply an inverse Hadamard transform on the quantized DC coefficients

transform.ihadamard4x4(m4Src, m4Inv);


// Restore DC coefficients through inverse quantization

quantizer.iquantization4x4DC(m4Inv, m4Inv);


// Restore DC coefficients into the transformed matrix

for (int j = 0; j < dcHeight; j++)

for (int i = 0; i < dcWidth; i++)

mrDst[j << 2][i << 2] = m4Inv[j][i];


// Apply inverse quantization and transform on AC coefficients

for (int block_y = 0; block_y < mbHeight; block_y += 4) {

for (int block_x = 0; block_x < mbWidth; block_x += 4) {

quantizer.iquantization4x4AC(mrDst, mrDst, block_y, block_x);

transform.inverse4x4(mrDst, mrDst, block_y, block_x);

}

}


}
}

      1. A6.6 Classe Intra8x8ChromaAbstactPredictor



package br.ufsc.inf.guiga.media.codec.video.h264.vcl.mode.prediction;
import java.util.Arrays;
import br.ufsc.inf.guiga.media.Global;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.FrameBuffer;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.Macroblock;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.algorithm.AlgorithmFactory;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.algorithm.Quantizer;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.algorithm.Scanner;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.algorithm.Transform;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.algorithm.Quantizer.QuantizerSummary;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.block.ResidualBlockInfo;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.datatype.ResidualBlockType;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.macroblock.MacroblockAccess;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.macroblock.MacroblockInfo;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.mode.decision.DistortionMetric;

import br.ufsc.inf.guiga.media.parser.video.YUVFrameBuffer;

import br.ufsc.inf.guiga.media.util.SupportMath;

import br.ufsc.inf.guiga.media.util.io.H264EntropyOutputStream;
/**

* This class provides common services for Intra 8x8 Chroma Prediction Modes.

*

* Both chroma blocks (Cb and Cr) of the macroblock use the same prediction mode. The

* prediction mode is applied to each of the chroma blocks separately.

*

* @author Guilherme Ferreira



*/

public abstract class Intra8x8ChromaAbstractPredictor implements IntraPredictor {

private enum ChromaType {

CB, CR

};
protected YUVFrameBuffer inputFrameBuffer; // raw YUV frame



protected YUVFrameBuffer outputFrameBuffer; // encoded frame
protected int x; // macroblock left up corner x0

protected int y; // macroblock left up corner y0
// macroblock original samples (8x8 samples) for U and V

protected int mOrigCb[][];

protected int mOrigCr[][];

// macroblock residual coded samples (8x8 samples) for U and V



protected int mResdCb[][];

protected int mResdCr[][];

// macroblock predicted samples (8x8 samples) for U and V



protected int mPredCb[][];

protected int mPredCr[][];

// coefficients for Hadamard (2x2 DC coefficients) for U and V



protected int mDcCb[][];

protected int mDcCr[][];
protected MacroblockAccess access;

protected MacroblockInfo info;

protected Transform transform;

protected Quantizer quantizer;

protected DistortionMetric distortion;

protected Scanner scanner;
// MbWidthC and MbHeightC specify the width and height, respectively, of the chroma

// arrays

// for each macroblock

protected int mbWidthC = 8;

protected int mbHeightC = 8;

// 2x2 DC coefficients for chroma blocks



protected int dcWidthC = 2;

protected int dcHeightC = 2;
protected static final int bitDepthC;

protected static final int maxImagePelValue;
static {

bitDepthC = Global.getInstance().getH264Control().getBitDepthChroma();

maxImagePelValue = (1 << bitDepthC) - 1; // 2 ^ bitDepth

}
// TODO find a more appropriate place to this constant

// threshold for chroma coefficients

protected static final int CHROMA_COEFF_COST = 4;
/**

* @param x the macroblock upper left corner horizontal position for chroma component.

* @param y the macroblock upper left corner vertical position for chroma component.

* @param access the {@link Macroblock} that allow access to the macroblock

* neighbours.

* @param algorithms the {@link AlgorithmFactory} for algorithms creation.

*/

public Intra8x8ChromaAbstractPredictor(

int x,

int y,

Macroblock macroblock,

AlgorithmFactory algorithms)

{

this.x = x;



this.y = y;

this.access = macroblock.getMacroblockAccess();

this.info = macroblock.getMacroblockInfo();
transform = algorithms.createTransform();

quantizer = algorithms.createQuantizer();

distortion = algorithms.createDistortionMetric();

scanner = algorithms.createScanner();


mOrigCb = new int[mbHeightC][mbWidthC];

mOrigCr = new int[mbHeightC][mbWidthC];

mResdCb = new int[mbHeightC][mbWidthC];

mResdCr = new int[mbHeightC][mbWidthC];

mPredCb = new int[mbHeightC][mbWidthC];

mPredCr = new int[mbHeightC][mbWidthC];

mDcCb = new int[dcHeightC][dcWidthC];

mDcCr = new int[dcHeightC][dcWidthC];

}
public boolean predict(

YUVFrameBuffer origiFrameBuffer, YUVFrameBuffer codedFrameBuffer)

{

// place the input frame data into a easy access matrix.



fillOriginalMatrix(origiFrameBuffer);
// Predict the samples.

return doIntraPrediction(codedFrameBuffer, mPredCb, mPredCr);

}
public int encode(YUVFrameBuffer inFrameBuffer, YUVFrameBuffer outFrameBuffer) {

// Fill the macroblock matrix with the residual samples, original minus

// predicted.

fillResidualMatrix();
// Apply transform and quantization on the residual samples and return

// the amount of non-zero coefficients



int nonZeroCoeffCb = forwardTransform(mResdCb, mDcCb, mResdCb, ChromaType.CB);

int nonZeroCoeffCr = forwardTransform(mResdCr, mDcCr, mResdCr, ChromaType.CR);
return (Math.max(nonZeroCoeffCb, nonZeroCoeffCr) << 4);

}
public void reconstruct(YUVFrameBuffer outFrameBuffer) {



int dq_bits = 6;

// Reconstructed residual sample



int residualReconsCb;

int residualReconsCr;

// Reconstructed original sample



int originalReconsCb;

int originalReconsCr;

// Predicted sample



int predictedCb;

int predictedCr;
// Apply the inverse quantization and transform on residual coefficients

int[][] mrInvCb = new int[mbHeightC][mbWidthC];

int[][] mrInvCr = new int[mbHeightC][mbWidthC];

inverseTransform(mDcCb, mResdCb, mrInvCb);

inverseTransform(mDcCr, mResdCr, mrInvCr);
for (int j = 0; j < mbHeightC; j++) {

int jj = y + j;
for (int i = 0; i < mbWidthC; i++) {

int ii = x + i;
predictedCb = mPredCb[j][i];

residualReconsCb = SupportMath.rshiftRound(mrInvCb[j][i], dq_bits);

originalReconsCb = residualReconsCb + predictedCb;

originalReconsCb = SupportMath.clip(maxImagePelValue, originalReconsCb);

outFrameBuffer.setCb8bit(ii, jj, originalReconsCb);
predictedCr = mPredCr[j][i];

residualReconsCr = SupportMath.rshiftRound(mrInvCr[j][i], dq_bits);

originalReconsCr = residualReconsCr + predictedCr;

originalReconsCr = SupportMath.clip(maxImagePelValue, originalReconsCr);

outFrameBuffer.setCr8bit(ii, jj, originalReconsCr);

}

}


}
public void write(H264EntropyOutputStream outStream, int codedBlockPattern) {

int maxNumDcCoeff = 4;

int maxNumAcCoeff = 15; // the first level of each AC block is the DC

int[] coeffLevelCb = new int[maxNumAcCoeff];

int[] coeffLevelCr = new int[maxNumAcCoeff];

int[] coeffRunCb = new int[maxNumAcCoeff];

int[] coeffRunCr = new int[maxNumAcCoeff];

ResidualBlockInfo blockInfoCb;

ResidualBlockInfo blockInfoCr;
// Check if any chroma bits in coded block pattern is set

if (codedBlockPattern > 15) {

// Get the information about this DC block

// Note: Chroma DC block has its own VLC table, which doesn't require to

// predict from other blocks

blockInfoCb = info.getBlockInfo(0, 0,

ResidualBlockType.CbIntra8x8ChromaDCLevel);

blockInfoCr = info.getBlockInfo(0, 0,

ResidualBlockType.CrIntra8x8ChromaDCLevel);

// DC coefficients: Reorder the matrix

scanner.reorder2x2(mDcCb, coeffLevelCb, coeffRunCb, 0, maxNumDcCoeff, 0, 0);

scanner.reorder2x2(mDcCr, coeffLevelCr, coeffRunCr, 0, maxNumDcCoeff, 0, 0);

// DC coefficients: Entropy code

outStream.writeResidualBlock(coeffLevelCb, coeffRunCb,

ResidualBlockType.CbIntra8x8ChromaDCLevel, blockInfoCb);

outStream.writeResidualBlock(coeffLevelCr, coeffRunCr,

ResidualBlockType.CrIntra8x8ChromaDCLevel, blockInfoCr);

}
// AC coefficients: check if chroma bits in coded block pattern = 10b

if (codedBlockPattern >> 4 == 2) {

// Write AC blocks of Cb



for (int block_y = 0; block_y < mbHeightC; block_y += 4) {

for (int block_x = 0; block_x < mbWidthC; block_x += 4) {

// Clean up the destination vectors

Arrays.fill(coeffLevelCb, 0);

Arrays.fill(coeffRunCb, 0);

// AC coefficients: Reorder the matrix

scanner.reorder4x4(mResdCb, coeffLevelCb, coeffRunCb, 1,

maxNumAcCoeff, block_y, block_x);

// Get the information about this AC block

blockInfoCb = info.getBlockInfo(block_x >> 2, block_y >> 2,

ResidualBlockType.CbIntra8x8ChromaACLevel);

// Entropy code

outStream.writeResidualBlock(coeffLevelCb, coeffRunCb,

ResidualBlockType.CbIntra8x8ChromaACLevel, blockInfoCb);

}

}



// Write AC blocks of Cr

for (int block_y = 0; block_y < mbHeightC; block_y += 4) {

for (int block_x = 0; block_x < mbWidthC; block_x += 4) {

// Clean up the destination vectors

Arrays.fill(coeffLevelCr, 0);

Arrays.fill(coeffRunCr, 0);

// AC coefficients: Reorder the matrix

scanner.reorder4x4(mResdCr, coeffLevelCr, coeffRunCr, 1,

maxNumAcCoeff, block_y, block_x);

// Get the information about this AC block

blockInfoCr = info.getBlockInfo(block_x >> 2, block_y >> 2,

ResidualBlockType.CrIntra8x8ChromaACLevel);

// Entropy code

outStream.writeResidualBlock(coeffLevelCr, coeffRunCr,

ResidualBlockType.CrIntra8x8ChromaACLevel, blockInfoCr);

}

}



}
}
public int getDistortion() {

int cost = 0;

int distortionCb = 0;

int distortionCr = 0;
for (int block_y = 0; block_y < mbHeightC; block_y += 4) {

for (int block_x = 0; block_x < mbWidthC; block_x += 4) {

distortionCb += distortion.getDistortion4x4(mOrigCb, mPredCb, block_y,

block_x);

distortionCr += distortion.getDistortion4x4(mOrigCr, mPredCr, block_y,

block_x);

}

}


// TODO why cost += (int) (enc_mb.lambda_me[Q_PEL] * mvbits[ mode ])?

// why does it need to exp golomb coding cost for mode signaling?

cost = distortionCb + distortionCr;
return cost;

}
/**

* Subclasses must implement this method in order to predict samples.

*

* @param codedFrameBuffer the {@link FrameBuffer} containing previously coded samples



* necessary for prediction.

* @param mPredCb the matrix where the predicted U samples must be placed.

* @param mPredCr the matrix where the predicted V samples must be placed.

* @return true if the prediction was successfully performed, or

* false if there were anything that prevented the prediction

* complete.

*/

protected abstract boolean doIntraPrediction(

YUVFrameBuffer codedFrameBuffer, int[][] mpCb, int[][] mpCr);


/**

* Fill the matrix with the original frame samples.

*

* @param origiFrameBuffer



*/

private void fillOriginalMatrix(YUVFrameBuffer origiFrameBuffer) {

for (int j = 0; j < mbHeightC; j++) {

int jj = y + j;

for (int i = 0; i < mbWidthC; i++) {

int ii = x + i;

mOrigCb[j][i] = origiFrameBuffer.getCb8bit(ii, jj);

mOrigCr[j][i] = origiFrameBuffer.getCr8bit(ii, jj);

}

}



}
/**

* Fill the matrix with the predicted residual samples.

*/

private void fillResidualMatrix() {

for (int j = 0; j < mbHeightC; j++) {

for (int i = 0; i < mbWidthC; i++) {

mResdCb[j][i] = mOrigCb[j][i] - mPredCb[j][i];

mResdCr[j][i] = mOrigCr[j][i] - mPredCr[j][i];

}

}



}
/**

* Applies a 8x8 4:2:0 chroma transform and quantization on the residual samples.

*

* @param mrSrc residual coefficients.



* @param m2Dst transformed hadamard coefficients.

* @param mrDst transformed residual coefficients.

* @param chromaType the {@link ChromaType} of this block.

* @return the amount of non-zero coefficients.

*/

private int forwardTransform(

int[][] mrSrc, int[][] m2Dst, int[][] mrDst, ChromaType chromaType)

{

ResidualBlockType typeDC, typeAC;



if (chromaType == ChromaType.CB) {

typeDC = ResidualBlockType.CbIntra8x8ChromaDCLevel;

typeAC = ResidualBlockType.CbIntra8x8ChromaACLevel;

} else {

typeDC = ResidualBlockType.CrIntra8x8ChromaDCLevel;

typeAC = ResidualBlockType.CrIntra8x8ChromaACLevel;

}
long coeffCost = 0;

int nonZeroCoeff = 0;

int dcCoeff = 0;

int acCoeff = 0;
// Do a forward transform on each 4x4 residual block of this macroblock

for (int block_y = 0; block_y < mbHeightC; block_y += 4) {

for (int block_x = 0; block_x < mbWidthC; block_x += 4) {

transform.forward4x4(mrSrc, mrDst, block_y, block_x);

}

}
// Build a 2x2 DC matrix with the DC coefficients from each 4x4 block



for (int j = 0; j < dcHeightC; j++) {

for (int i = 0; i < dcWidthC; i++) {

m2Dst[j][i] = mrDst[j << 2][i << 2];

}

}
// The DC coefficient of each block is transformed again using a 2x2



// Hadamard transform

transform.hadamard2x2(m2Dst, m2Dst);


// Quantize the DC matrix to produce a block of quantized DC coefficients

QuantizerSummary qs = quantizer.quantization2x2DC(m2Dst, m2Dst);



if (qs.nonZeroCoeff > 0) {

dcCoeff = 1;

}
// Fulfill the info of this residual block

ResidualBlockInfo blockInfo = new ResidualBlockInfo(qs.nonZeroCoeff);

info.setBlockInfo(0, 0, typeDC, blockInfo);
// Quantize AC coefficients

for (int block_y = 0; block_y < mbHeightC; block_y += 4) {

for (int block_x = 0; block_x < mbWidthC; block_x += 4) {
qs = quantizer.quantization4x4AC(mrDst, mrDst, block_y, block_x);
coeffCost += qs.coeffCost;
if (qs.nonZeroCoeff > 0) {

nonZeroCoeff = qs.nonZeroCoeff;

acCoeff = 1;

}
// Fulfill the info of this residual AC block

blockInfo = new ResidualBlockInfo(qs.nonZeroCoeff);

info.setBlockInfo(block_x >> 2, block_y >> 2, typeAC, blockInfo);

}

}
// Perform thresholding



if ((nonZeroCoeff > 0) && (coeffCost < CHROMA_COEFF_COST)) {

// If there's any AC block with coefficient different than zero, reset all

// chroma coefficients.

for (int block_y = 0; block_y < mbHeightC; block_y += 4) {

for (int block_x = 0; block_x < mbWidthC; block_x += 4) {
for (int jj = block_y; jj < (block_y + 4); jj++)

for (int ii = block_x; ii < (block_x + 4); ii++)

mrDst[jj][ii] = 0;

}

}
acCoeff = 0;



}
// If AC coefficients are enabled, DC coefficients must be set

if (acCoeff > 0)

dcCoeff = 1;


return (dcCoeff + acCoeff);

}
/**

* Applies a 8x8 4:2:0 chroma inverse transform and quantization on the residual

* samples.

*

* @param m2Src transformed hadamard coefficients



* @param mrSrc transformed residual coefficients

* @param mrDst inverse transformed residual coefficients

*/

private void inverseTransform(int[][] m2Src, int[][] mrSrc, int[][] mrDst) {

int[][] m2Inv = new int[dcHeightC][dcWidthC];
// Copy the source into destination matrix

for (int j = 0; j < mbHeightC; j++)

for (int i = 0; i < mbWidthC; i++)

mrDst[j][i] = mrSrc[j][i];


// Apply an inverse Hadamard transform on the quantized DC coefficients

transform.ihadamard2x2(m2Src, m2Inv);


// Restore DC coefficients through inverse quantization

quantizer.iquantization2x2DC(m2Inv, m2Inv);


// Restore DC coefficients into the transformed matrix

for (int j = 0; j < dcHeightC; j++)

for (int i = 0; i < dcWidthC; i++)

mrDst[j << 2][i << 2] = m2Inv[j][i];


// Apply inverse quantization and transform on AC coefficients

for (int block_y = 0; block_y < mbHeightC; block_y += 4) {

for (int block_x = 0; block_x < mbWidthC; block_x += 4) {

quantizer.iquantization4x4AC(mrDst, mrDst, block_y, block_x);

transform.inverse4x4(mrDst, mrDst, block_y, block_x);

}

}


}
}

      1. A6.7 Classe Intra16x16LumaDCPredictor



package br.ufsc.inf.guiga.media.codec.video.h264.vcl.mode.prediction;
import java.awt.Point;
import br.ufsc.inf.guiga.media.codec.video.h264.vcl.Macroblock;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.algorithm.AlgorithmFactory;

import br.ufsc.inf.guiga.media.parser.video.YUVFrameBuffer;

import br.ufsc.inf.guiga.media.util.SupportMath;
/**

* Intra 16x16 Luma Prediction Mode 2 (DC): Mean of upper and left-hand samples

* (H + V).

*

* @author Guilherme Ferreira



*/

public class Intra16x16LumaDCPredictor extends Intra16x16LumaAbstractPredictor {
public Intra16x16LumaDCPredictor(

int x,

int y,

Macroblock macroblock,

AlgorithmFactory algorithms)

{

super(x, y, macroblock, algorithms);

}
protected boolean doIntraPrediction(

YUVFrameBuffer codedFrameBuffer, int[][] mp)

{

Point p = new Point();



int predL;

int sumUp = 0;

int sumLeft = 0;

int maxW = Macroblock.MB_WIDTH;

int maxH = Macroblock.MB_HEIGHT;

boolean upAvail = access.isUpAvailable(maxW);

boolean leftAvail = access.isLeftAvailable(maxH);
// sum(x' = 0 to 15) { p[x', -1] }

if (upAvail) {

for (int x = 0; x < Macroblock.MB_WIDTH; x++) {

access.getNeighbour(x, -1, maxW, maxH, p);

sumUp += codedFrameBuffer.getY8bit(p.x, p.y);

}

}



// sum(y' = 0 to 15) { p[-1, y'] }

if (leftAvail) {

for (int y = 0; y < Macroblock.MB_HEIGHT; y++) {

access.getNeighbour(-1, y, maxW, maxH, p);

sumLeft += codedFrameBuffer.getY8bit(p.x, p.y);

}

}


if (upAvail && leftAvail) { // no edge

// predL[x, y] = (sumUp + sumLeft + 16) >> 5 , with x, y = 0..15

predL = SupportMath.rshiftRound((sumUp + sumLeft), 5);
} else if (!upAvail && leftAvail) { // upper edge

// predL[x, y] = (sumLeft + 8) >> 4, with x, y = 0..15

predL = SupportMath.rshiftRound(sumLeft, 4);
} else if (upAvail && !leftAvail) { // left edge

// predL[x, y] = (sumUp + 8) >> 4, with x, y = 0..15

predL = SupportMath.rshiftRound(sumUp, 4);
} else { // top left corner

// predL[x, y] = (1 << (BitDepthY – 1)), with x, y = 0..15

predL = 1 << (bitDepthY - 1);
}
// store DC prediction

for (int j = 0; j < Macroblock.MB_HEIGHT; j++) {

for (int i = 0; i < Macroblock.MB_WIDTH; i++) {

mp[j][i] = predL;

}

}
return true;



}

}

      1. A6.8 Classe Intra8x8ChromaDCPredictor



package br.ufsc.inf.guiga.media.codec.video.h264.vcl.mode.prediction;
import java.awt.Point;
import br.ufsc.inf.guiga.media.codec.video.h264.vcl.Macroblock;

import br.ufsc.inf.guiga.media.codec.video.h264.vcl.algorithm.AlgorithmFactory;

import br.ufsc.inf.guiga.media.parser.video.YUVFrameBuffer;
/**

* Intra 8x8 Chroma Prediction Mode 0 (DC).

*

* @author Guilherme Ferreira



*/

public class Intra8x8ChromaDCPredictor extends Intra8x8ChromaAbstractPredictor {
private static final int CHROMA_BLK_WIDTH = 4;

private static final int CHROMA_BLK_HEIGHT = 4;

private static final int CHROMA_BLK_QUANTITY = 2;
public Intra8x8ChromaDCPredictor(

int x,

int y,

Macroblock macroblock,

AlgorithmFactory algorithms)

{

super(x, y, macroblock, algorithms);

}
protected boolean doIntraPrediction(

YUVFrameBuffer codedFrameBuffer, int[][] mpCb, int[][] mpCr)

{

Point p = new Point();



int maxW = Macroblock.MB_CHROMA_WIDTH;

int maxH = Macroblock.MB_CHROMA_HEIGHT;

boolean upAvail = access.isUpAvailable(maxW);

boolean leftAvail = access.isLeftAvailable(maxH);
// For each chroma block of 4x4 samples indexed by

// chroma4x4BlkIdx = 0..( 1 << ( ChromaArrayType + 1 ) ) – 1



for (int chroma4x4BlkIdx = 0; chroma4x4BlkIdx < 4; chroma4x4BlkIdx++) {

int predCb = 0;

int predCr = 0;

int sumUpCb = 0;

int sumUpCr = 0;

int sumLeftCb = 0;

int sumLeftCr = 0;
int xO = posX(chroma4x4BlkIdx);

int yO = posY(chroma4x4BlkIdx);
// sum(x' = 0 to 3) { p[x' + xO, -1] }

if (upAvail) {

for (int x = 0; x < CHROMA_BLK_WIDTH; x++) {

access.getNeighbour(x, -1, maxW, maxH, p);

sumUpCb += codedFrameBuffer.getCb8bit(p.x + xO, p.y);

sumUpCr += codedFrameBuffer.getCr8bit(p.x + xO, p.y);

}

}

// sum(y' = 0 to 3) { p[-1, y' + yO] }



if (leftAvail) {

for (int y = 0; y < CHROMA_BLK_HEIGHT; y++) {

access.getNeighbour(-1, y, maxW, maxH, p);

sumLeftCb += codedFrameBuffer.getCb8bit(p.x, p.y + yO);

sumLeftCr += codedFrameBuffer.getCr8bit(p.x, p.y + yO);

}

}
// TOP-LEFT and BOTTOM-RIGHT



if (((xO == 0) && (yO == 0)) || ((xO > 0) && (yO > 0))) {
if (upAvail && leftAvail) {

// predC[x+xO, y+yO] = (sumUp + sumLeft + 4) >> 3

predCb = (sumUpCb + sumLeftCb + 4) >> 3;

predCr = (sumUpCr + sumLeftCr + 4) >> 3;

} else if (leftAvail) {

// predC[x+xO, y+yO] = (sumLeft + 2) >> 2

predCb = (sumLeftCb + 2) >> 2;

predCr = (sumLeftCr + 2) >> 2;

} else if (upAvail) {

// predC[x+xO, y+yO] = (sumUp + 2) >> 2

predCb = (sumUpCb + 2) >> 2;

predCr = (sumUpCr + 2) >> 2;

} else {

// predC[x+xO, y+yO] = (1 << ( BitDepthC – 1 ))

predCb = 1 << (bitDepthC - 1);

predCr = 1 << (bitDepthC - 1);

}
} // TOP-RIGHT

else if ((xO > 0) && (yO == 0)) {
if (upAvail) {

// predC[x+xO, y+yO] = (sumUp + 2) >> 2

predCb = (sumUpCb + 2) >> 2;

predCr = (sumUpCr + 2) >> 2;

} else if (leftAvail) {

// predC[x+xO, y+yO] = (sumLeft + 2) >> 2

predCb = (sumLeftCb + 2) >> 2;

predCr = (sumLeftCr + 2) >> 2;

} else {

// predC[x+xO, y+yO] = (1 << ( BitDepthC – 1 ))

predCb = 1 << (bitDepthC - 1);

predCr = 1 << (bitDepthC - 1);

}
} // BOTTOM-LEFT

else if ((xO == 0) && (yO > 0)) {
if (leftAvail) {

// predC[x+xO, y+yO] = (sumLeft + 2) >> 2

predCb = (sumLeftCb + 2) >> 2;

predCr = (sumLeftCr + 2) >> 2;

} else if (upAvail) {

// predC[x+xO, y+yO] = (sumUp + 2) >> 2

predCb = (sumUpCb + 2) >> 2;

predCr = (sumUpCr + 2) >> 2;

} else {

// predC[x+xO, y+yO] = (1 << ( BitDepthC – 1 ))

predCb = 1 << (bitDepthC - 1);

predCr = 1 << (bitDepthC - 1);

}
}
// store DC prediction

for (int j = yO; j < CHROMA_BLK_HEIGHT + yO; j++) {

for (int i = xO; i < CHROMA_BLK_WIDTH + xO; i++) {

mpCb[j][i] = predCb;

mpCr[j][i] = predCr;

}

}



}
return true;

}
private int posX(int blkIdx) {



return (blkIdx % CHROMA_BLK_QUANTITY) * CHROMA_BLK_WIDTH;

}
private int posY(int blkIdx) {



return (blkIdx / CHROMA_BLK_QUANTITY) * CHROMA_BLK_HEIGHT;

}
}





1   ...   11   12   13   14   15   16   17   18   ...   24


©livred.info 2017
enviar mensagem

    Página principal