接上篇,最終通過ByteBufferWebpDecoder執(zhí)行decode來對(duì)獲取的圖片數(shù)據(jù)進(jìn)行解碼。
項(xiàng)目是出自:https://github.com/zjupure/GlideWebpDecoder
ByteBufferWebpDecoder.java
@Override
public Resource<WebpDrawable> decode(@NonNull ByteBuffer source, int width, int height, @NonNull Options options) throws IOException {
int length = source.remaining();
byte[] data = new byte[length];
source.get(data, 0, length);
//1
WebpImage webp = WebpImage.create(data);
int sampleSize = Utils.getSampleSize(webp.getWidth(), webp.getHeight(), width, height);
//2
WebpDecoder webpDecoder = new WebpDecoder(mProvider, webp, source, sampleSize);
webpDecoder.advance();
//3
Bitmap firstFrame = webpDecoder.getNextFrame();
if (firstFrame == null) {
return null;
}
Transformation<Bitmap> unitTransformation = UnitTransformation.get();
//4
return new WebpDrawableResource(new WebpDrawable(mContext, webpDecoder, mBitmapPool, unitTransformation, width, height,
firstFrame));
}
1.WebpImage.create(data);
WebpImage.java
public static WebpImage create(byte[] source) {
Preconditions.checkNotNull(source);
ByteBuffer byteBuffer = ByteBuffer.allocateDirect(source.length);
byteBuffer.put(source);
byteBuffer.rewind();
return nativeCreateFromDirectByteBuffer(byteBuffer);
}
這里native代碼基于google開源項(xiàng)目 https://github.com/webmproject/libwebp
web.cpp
/**
* Creates a new WebPImage from the specified byte buffer. The data from the byte buffer is copied
* into native memory managed by WebPImage.
*
* @param byteBuffer A java.nio.ByteBuffer. Must be direct. Assumes data is the entire capacity
* of the buffer
* @return a newly allocated WebPImage
*/
jobject WebPImage_nativeCreateFromDirectByteBuffer(JNIEnv* pEnv, jclass clazz, jobject byteBuffer) {
jbyte* bbufInput = (jbyte*) pEnv->GetDirectBufferAddress(byteBuffer);
if (!bbufInput) {
throwIllegalArgumentException(pEnv, "ByteBuffer must be direct");
return 0;
}
jlong capacity = pEnv->GetDirectBufferCapacity(byteBuffer);
if (pEnv->ExceptionCheck()) {
return 0;
}
std::vector<uint8_t> vBuffer(bbufInput, bbufInput + capacity);
return WebPImage_nativeCreateFromByteVector(pEnv, vBuffer);
/**
* Creates a new WebPImage from the specified buffer.
*
* @param vBuffer the vector containing the bytes
* @return a newly allocated WebPImage
*/
jobject WebPImage_nativeCreateFromByteVector(JNIEnv* pEnv, std::vector<uint8_t>& vBuffer) {
std::unique_ptr<WebPImage> spNativeWebpImage(new WebPImage());
if (!spNativeWebpImage) {
throwOutOfMemoryError(pEnv, "Unable to allocate native context");
return 0;
}
// WebPData is on the stack as its only used during the call to WebPDemux.
WebPData webPData;
webPData.bytes = vBuffer.data();
webPData.size = vBuffer.size();
// Create the WebPDemuxer
auto spDemuxer = std::unique_ptr<WebPDemuxer, decltype(&WebPDemuxDelete)> {
WebPDemux(&webPData),
WebPDemuxDelete
};
if (!spDemuxer) {
// We may want to consider first using functions that will return a useful error code
// if it fails to parse.
throwIllegalArgumentException(pEnv, "Failed to create demuxer");
//FBLOGW("unable to get demuxer");
return 0;
}
spNativeWebpImage->pixelWidth = WebPDemuxGetI(spDemuxer.get(), WEBP_FF_CANVAS_WIDTH);
spNativeWebpImage->pixelHeight = WebPDemuxGetI(spDemuxer.get(), WEBP_FF_CANVAS_HEIGHT);
spNativeWebpImage->numFrames = WebPDemuxGetI(spDemuxer.get(), WEBP_FF_FRAME_COUNT);
spNativeWebpImage->loopCount = WebPDemuxGetI(spDemuxer.get(), WEBP_FF_LOOP_COUNT);
spNativeWebpImage->backgroundColor = WebPDemuxGetI(spDemuxer.get(), WEBP_FF_BACKGROUND_COLOR);
// Compute cached fields that require iterating the frames.
jint durationMs = 0;
std::vector<jint> frameDurationsMs;
WebPIterator iter;
if (WebPDemuxGetFrame(spDemuxer.get(), 1, &iter)) {
do {
durationMs += iter.duration;
frameDurationsMs.push_back(iter.duration);
} while (WebPDemuxNextFrame(&iter));
WebPDemuxReleaseIterator(&iter);
}
spNativeWebpImage->durationMs = durationMs;
spNativeWebpImage->frameDurationsMs = frameDurationsMs;
jintArray frameDurationsArr = pEnv->NewIntArray(spNativeWebpImage->numFrames);
pEnv->SetIntArrayRegion(frameDurationsArr, 0, spNativeWebpImage->numFrames, spNativeWebpImage->frameDurationsMs.data());
// Ownership of pDemuxer and vBuffer is transferred to WebPDemuxerWrapper here.
// Note, according to Rob Arnold, createNew assumes we throw exceptions but we don't. Though
// he claims this won't happen in practice cause "Linux will overcommit pages, we should only
// get this error if we run out of virtual address space." Also, Daniel C may be working
// on converting to exceptions.
spNativeWebpImage->spDemuxer = std::shared_ptr<WebPDemuxerWrapper>(
new WebPDemuxerWrapper(std::move(spDemuxer), std::move(vBuffer)));
// Create the WebPImage with the native context.
jobject ret = pEnv->NewObject(
sClazzWebPImage,
sWebPImageConstructor,
(jlong) spNativeWebpImage.get(),
(jint)spNativeWebpImage->pixelWidth,
(jint)spNativeWebpImage->pixelHeight,
(jint)spNativeWebpImage->numFrames,
(jint)spNativeWebpImage->durationMs,
frameDurationsArr,
(jint)spNativeWebpImage->loopCount,
(jint)spNativeWebpImage->backgroundColor);
if (ret != nullptr) {
// Ownership was transferred.
spNativeWebpImage->refCount = 1;
spNativeWebpImage.release();
}
return ret;
}
這里就是在native創(chuàng)建WebpImage, 同時(shí)將byte buffer copy到native由WebpImage管理,同時(shí)native WebpImage 會(huì)創(chuàng)建一個(gè)java層的WebpImage供上層調(diào)用與之進(jìn)行JNI操作。所以真正處理webp圖片的功能在native WebpImage。
2.WebpDecoder初始化
WebpDecoder.java
private final LruCache<Integer, Bitmap> mFrameBitmapCache;
public WebpDecoder(GifDecoder.BitmapProvider provider, WebpImage webPImage, ByteBuffer rawData,
int sampleSize) {
mBitmapProvider = provider;
mWebPImage = webPImage;
...
mTransparentFillPaint = new Paint();
...
// 動(dòng)畫每一幀渲染后的Bitmap緩存
mFrameBitmapCache = new LruCache<Integer, Bitmap>(MAX_FRAME_BITMAP_CACHE_SIZE) {
@Override
protected void entryRemoved(boolean evicted, Integer key, Bitmap oldValue, Bitmap newValue) {
// Return the cached frame bitmap to the provider
if (oldValue != null) {
mBitmapProvider.release(oldValue);
}
}
};
setData(new GifHeader(), rawData, sampleSize);
}
3. webpDecoder.getNextFrame();
@Override
public Bitmap getNextFrame() {
int frameNumber = getCurrentFrameIndex();
...
for (int index = nextIndex; index < frameNumber; index++) {
WebpFrameInfo frameInfo = mFrameInfos[index];
if (!frameInfo.blendPreviousFrame) {
disposeToBackground(canvas, frameInfo);
}
// render the previous frame
renderFrame(index, canvas);
if (Log.isLoggable(TAG, Log.DEBUG)) {
Log.d(TAG, "renderFrame, index=" + index + ", blend=" + frameInfo.blendPreviousFrame
+ ", dispose=" + frameInfo.disposeBackgroundColor);
}
if (frameInfo.disposeBackgroundColor) {
disposeToBackground(canvas, frameInfo);
}
}
...
// Then put the rendered frame into the BitmapCache
cacheFrameBitmap(frameNumber, bitmap);
return bitmap;
}
private void renderFrame(int frameNumber, Canvas canvas) {
WebpFrameInfo frameInfo = mFrameInfos[frameNumber];
int frameWidth = frameInfo.width / sampleSize;
int frameHeight = frameInfo.height / sampleSize;
int xOffset = frameInfo.xOffset / sampleSize;
int yOffset = frameInfo.yOffset / sampleSize;
WebpFrame webpFrame = mWebPImage.getFrame(frameNumber);
try {
Bitmap frameBitmap = mBitmapProvider.obtain(frameWidth, frameHeight, mBitmapConfig);
frameBitmap.eraseColor(Color.TRANSPARENT);
webpFrame.renderFrame(frameWidth, frameHeight, frameBitmap);
canvas.drawBitmap(frameBitmap, xOffset, yOffset, null);
mBitmapProvider.release(frameBitmap);
} finally {
webpFrame.dispose();
}
}
通過WebpFrame獲取幀數(shù)據(jù),然后進(jìn)行渲染,最后由mFrameBitmapCache緩存Bitmap。
4.最終return WebpDrawableResource
這里前面先初始化了一個(gè)unitTransformation,Transformation作用是:Resource經(jīng)過Transformation轉(zhuǎn)化為TransformedResource(eg:轉(zhuǎn)化為圓角或者圓形)。這里初始化的unitTransformation直接返回Resource,不做任何處理。
return new WebpDrawableResource(new WebpDrawable(mContext, webpDecoder, mBitmapPool, unitTransformation, width, height, firstFrame));