VVC中提出的sbTMVP (Subblock-based temporal motion vector prediction)方法类似于HEVC中的TMVP(时域运动矢量预测)技术,sbtMVP使用同位图像(时域邻近的已编码图像)的运动区域来提升当前帧merge模式中的运动矢量预测,sbTMVP和TMVP使用的同位图像是一样的。sbtMVP和TMVP主要在以下两个方面有区别:
sbtMVP的预测过程主要有两步:
如图所示:

获取当前CU相邻区域A1这个点的MV,将这个mv作为当前CU的运动偏移MVshift。若A1这个区域的mv无效,则默认运动偏移为(0,0)。
获取运动偏移MVshift
void PU::getAffineMergeCand( const PredictionUnit &pu, AffineMergeCtx& affMrgCtx, const int mrgCandIdx )
{
...
bool enableSubPuMvp = slice.getSPS()->getSBTMVPEnabledFlag() && !(slice.getPOC() == slice.getRefPic(REF_PIC_LIST_0, 0)->getPOC() && slice.isIRAP());
bool isAvailableSubPu = false;
//获取sbtMVP的mv
if ( enableSubPuMvp && slice.getPicHeader()->getEnableTMVPFlag() )
{
MergeCtx mrgCtx = *affMrgCtx.mrgCtx;
bool tmpLICFlag = false;
CHECK( mrgCtx.subPuMvpMiBuf.area() == 0 || !mrgCtx.subPuMvpMiBuf.buf, "Buffer not initialized" );
mrgCtx.subPuMvpMiBuf.fill( MotionInfo() );
int pos = 0;//用来记录当前CU相邻CU可用的个数,这里只判断A1这个位置的相邻CU
// Get spatial MV
const Position posCurLB = pu.Y().bottomLeft();
MotionInfo miLeft;
//left
const PredictionUnit* puLeft = cs.getPURestricted( posCurLB.offset( -1, 0 ), pu, pu.chType );
const bool isAvailableA1 = puLeft && isDiffMER(pu.lumaPos(), posCurLB.offset(-1, 0), plevel) && pu.cu != puLeft->cu && CU::isInter( *puLeft->cu );
if ( isAvailableA1 )
{
miLeft = puLeft->getMotionInfo( posCurLB.offset( -1, 0 ) );
// get Inter Dir
mrgCtx.interDirNeighbours[pos] = miLeft.interDir;
// get Mv from Left
mrgCtx.mvFieldNeighbours[pos << 1].setMvField( miLeft.mv[0], miLeft.refIdx[0] );
if ( slice.isInterB() )
{
mrgCtx.mvFieldNeighbours[(pos << 1) + 1].setMvField( miLeft.mv[1], miLeft.refIdx[1] );
}
pos++;
}
mrgCtx.numValidMergeCand = pos;
isAvailableSubPu = getInterMergeSubPuMvpCand( pu, mrgCtx, tmpLICFlag, pos
, 0
);
if ( isAvailableSubPu )
{
//将sbtMVP模式中的中心位置的mvp作为affine merge列表中的第一个候选mv
//3个控制点都用同一个mv填充
for ( int mvNum = 0; mvNum < 3; mvNum++ )
{
affMrgCtx.mvFieldNeighbours[(affMrgCtx.numValidMergeCand << 1) + 0][mvNum].setMvField( mrgCtx.mvFieldNeighbours[(pos << 1) + 0].mv, mrgCtx.mvFieldNeighbours[(pos << 1) + 0].refIdx );
affMrgCtx.mvFieldNeighbours[(affMrgCtx.numValidMergeCand << 1) + 1][mvNum].setMvField( mrgCtx.mvFieldNeighbours[(pos << 1) + 1].mv, mrgCtx.mvFieldNeighbours[(pos << 1) + 1].refIdx );
}
affMrgCtx.interDirNeighbours[affMrgCtx.numValidMergeCand] = mrgCtx.interDirNeighbours[pos];
affMrgCtx.affineType[affMrgCtx.numValidMergeCand] = AFFINE_MODEL_NUM;
affMrgCtx.mergeType[affMrgCtx.numValidMergeCand] = MRG_TYPE_SUBPU_ATMVP;
if ( affMrgCtx.numValidMergeCand == mrgCandIdx )
{
return;
}
affMrgCtx.numValidMergeCand++;
// early termination
if ( affMrgCtx.numValidMergeCand == maxNumAffineMergeCand )
{
return;
}
}
}
...
}
根据运动偏移,得到当前CU中心位置对应的同位块中子CU的运动矢量colMV,再按照公式:
m
v
p
=
t
p
t
d
∗
c
o
l
M
V
mvp=\frac{tp}{td}*colMV
mvp=tdtp∗colMV
t
p
tp
tp表示当前图像与参考图像的时域距离(poc之差)、
t
d
td
td表示同位图像与参考图像的时域距离。
计算得到的mvp作为affine merge候选的第一个候选mv,若当前中心位置对应的同位块子CU不属于帧间预测,则当前CU不能使用sbTMVP模式。
再按照同样的方式,得到当前CU内每个8x8子CU的mvp,若是双向预测帧,则存在两个mvp。

bool PU::getInterMergeSubPuMvpCand(const PredictionUnit &pu, MergeCtx& mrgCtx, bool& LICFlag, const int count
, int mmvdList
)
{
...
const MotionInfo &mi = pColPic->cs->getMotionInfo(centerPos);//当前CU偏移后的中心点对应同位图像中的运动信息
if (mi.isInter && mi.isIBCmot == false)
{
mrgCtx.interDirNeighbours[count] = 0;
for (unsigned currRefListId = 0; currRefListId < (bBSlice ? 2 : 1); currRefListId++)
{
RefPicList currRefPicList = RefPicList(currRefListId);
//利用中心位置在同位图像中的对应CU的mv,再根据公式5-5计算得到当前CU中心点的mv:ColMv
//如果是双向预测的话,会得到两个colMV
if (getColocatedMVP(pu, currRefPicList, centerPos, cColMv, refIdx, true))
{
// set as default, for further motion vector field spanning
mrgCtx.mvFieldNeighbours[(count << 1) + currRefListId].setMvField(cColMv, 0);
mrgCtx.interDirNeighbours[count] |= (1 << currRefListId);
LICFlag = tempLICFlag;
mrgCtx.BcwIdx[count] = BCW_DEFAULT;
found = true;
}
else
{
mrgCtx.mvFieldNeighbours[(count << 1) + currRefListId].setMvField(Mv(), NOT_VALID);
mrgCtx.interDirNeighbours[count] &= ~(1 << currRefListId);
}
}
}
if (!found)
{
return false;
}
if (mmvdList != 1)
{
int xOff = (puWidth >> 1) + tempX;
int yOff = (puHeight >> 1) + tempY;
MotionBuf& mb = mrgCtx.subPuMvpMiBuf;
const bool isBiPred = isBipredRestriction(pu);
//对当前CU以8x8子块为单位,按照同样的公式和方法得到子块的mvp
for (int y = puPos.y; y < puPos.y + puSize.height; y += puHeight)
{
for (int x = puPos.x; x < puPos.x + puSize.width; x += puWidth)
{
Position colPos{ x + xOff, y + yOff };
clipColPos(colPos.x, colPos.y, pu);
colPos = Position{ PosType(colPos.x & mask), PosType(colPos.y & mask) };
const MotionInfo &colMi = pColPic->cs->getMotionInfo(colPos);
MotionInfo mi;
found = false;
mi.isInter = true;
mi.sliceIdx = slice.getIndependentSliceIdx();
mi.isIBCmot = false;
if (colMi.isInter && colMi.isIBCmot == false)
{
for (unsigned currRefListId = 0; currRefListId < (bBSlice ? 2 : 1); currRefListId++)
{
RefPicList currRefPicList = RefPicList(currRefListId);
if (getColocatedMVP(pu, currRefPicList, colPos, cColMv, refIdx, true))
{
mi.refIdx[currRefListId] = 0;
mi.mv[currRefListId] = cColMv;
found = true;
}
}
}
if (!found)//若对应子块不能使用sbtmvp,就使用当前CU中心位置的mvp
{
mi.mv[0] = mrgCtx.mvFieldNeighbours[(count << 1) + 0].mv;
mi.mv[1] = mrgCtx.mvFieldNeighbours[(count << 1) + 1].mv;
mi.refIdx[0] = mrgCtx.mvFieldNeighbours[(count << 1) + 0].refIdx;
mi.refIdx[1] = mrgCtx.mvFieldNeighbours[(count << 1) + 1].refIdx;
}
mi.interDir = (mi.refIdx[0] != -1 ? 1 : 0) + (mi.refIdx[1] != -1 ? 2 : 0);
if (isBiPred && mi.interDir == 3)
{
mi.interDir = 1;
mi.mv[1] = Mv();
mi.refIdx[1] = NOT_VALID;
}
mb.subBuf(g_miScaling.scale(Position{ x, y } -pu.lumaPos()), g_miScaling.scale(Size(puWidth, puHeight))).fill(mi);
}
}
}
return true;
}

if (pu.mergeType != MRG_TYPE_DEFAULT_N && pu.mergeType != MRG_TYPE_IBC)
{
CHECK(predBufWOBIO != NULL, "the case should not happen!");
xSubPuMC( pu, predBuf, eRefPicList, luma, chroma );
}
void InterPrediction::xSubPuMC( PredictionUnit& pu, PelUnitBuf& predBuf, const RefPicList &eRefPicList /*= REF_PIC_LIST_X*/, const bool luma /*= true*/, const bool chroma /*= true*/)
{
......
for (int fstDim = fstStart; fstDim < fstEnd; fstDim += fstStep)
{
for (int secDim = secStart; secDim < secEnd; secDim += secStep)
{
int x = !verMC ? secDim : fstDim;
int y = !verMC ? fstDim : secDim;
const MotionInfo &curMi = pu.getMotionInfo(Position{ x, y });
//对于每个以8x8为间隔的点
//x=0 y=0
int length = secStep;
int later = secDim + secStep;//=secStart+secStep 当前x点+8(即宽度+8)
while (later < secEnd)
{
const MotionInfo &laterMi = !verMC ? pu.getMotionInfo(Position{ later, fstDim }) : pu.getMotionInfo(Position{ fstDim, later });
//Position(8,0)---x==8,y==0这个点的mv信息
//这个点的mv和x,y的mv一样,就length+8
//再接着判断下一个8x8单元的mv是否和当前x,y点一样,
//这里的getMotionInfo应该是存的之前getInterMergeSubPuMvpCand计算得到的所有子块的mv
if (!scaled && laterMi == curMi)
{
length += secStep;
}
else
{
break;
}
later += secStep;
}
int dx = !verMC ? length : puWidth;
int dy = !verMC ? puHeight : length;
//高度是8,宽度是mv相同的8x8区域,构造一个pu进行运动补偿。
subPu.UnitArea::operator=(UnitArea(pu.chromaFormat, Area(x, y, dx, dy)));
subPu = curMi;
PelUnitBuf subPredBuf = predBuf.subBuf(UnitAreaRelative(pu, subPu));
subPu.mmvdEncOptMode = 0;
subPu.mvRefine = false;
motionCompensation(subPu, subPredBuf, eRefPicList, luma, chroma);
secDim = later - secStep;
}
}
m_subPuMC = false;
}