背景: 由於,項目需要,需要進行視頻通信,把a的畫面,轉給b。 運維部署: APP1:編碼攝像頭採集的數據,並且發送數據到服務端 APP2:從服務端,拉取數據,並且進行解碼顯示 服務端:接收APP1提交的數據,發送APP1提交數據到APP2 應用說明: APP1:camera = Camera.op ...
背景:
由於,項目需要,需要進行視頻通信,把a的畫面,轉給b。
運維部署:
APP1:編碼攝像頭採集的數據,並且發送數據到服務端
APP2:從服務端,拉取數據,並且進行解碼顯示
服務端:接收APP1提交的數據,發送APP1提交數據到APP2
應用說明:
APP1:camera = Camera.open(Camera.CameraInfo.CAMERA_FACING_FRONT);
Camera.Parameters parameters = camera.getParameters(); parameters.setPreviewFormat(ImageFormat.NV21); parameters.setPreviewSize(width, height); // 設置屏幕亮度 parameters.setExposureCompensation(parameters.getMaxExposureCompensation() / 2); camera.setParameters(parameters); camera.setDisplayOrientation(90); camera.setPreviewCallback(new Camera.PreviewCallback() { @Override public void onPreviewFrame(byte[] data, Camera camera) {
// 採集視頻數據,同時記錄採集視頻的時間點,解碼需要(保證視頻連續,流暢,且不花屏需要) stamptime = System.nanoTime(); yuv_data = data; } });
1 public class AvcKeyFrameEncoder { 2 private final static String TAG = "MeidaCodec"; 3 private int TIMEOUT_USEC = 12000; 4 5 private MediaCodec mediaCodec; 6 int m_width; 7 int m_height; 8 int m_framerate; 9 10 public byte[] configbyte; 11 12 //待解碼視頻緩衝隊列,靜態成員! 13 public byte[] yuv_data = null; 14 public long stamptime = 0; 15 16 public AvcKeyFrameEncoder(int width, int height, int framerate) { 17 m_width = width; 18 m_height = height; 19 m_framerate = framerate; 20 21 //正常的編碼出來是橫屏的。因為手機本身採集的數據預設就是橫屏的 22 // MediaFormat mediaFormat = MediaFormat.createVideoFormat(mime, width, height); 23 //如果你需要旋轉90度或者270度,那麼需要把寬和高對調。否則會花屏。因為比如你320 X 240,圖像旋轉90°之後寬高變成了240 X 320。 24 MediaFormat mediaFormat = MediaFormat.createVideoFormat("video/avc", width, height); 25 mediaFormat.setInteger(MediaFormat.KEY_COLOR_FORMAT, MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420SemiPlanar); 26 mediaFormat.setInteger(MediaFormat.KEY_BIT_RATE, 125000); 27 mediaFormat.setInteger(MediaFormat.KEY_FRAME_RATE, framerate); // 30 28 mediaFormat.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, 1); 29 try { 30 mediaCodec = MediaCodec.createEncoderByType("video/avc"); 31 } catch (IOException e) { 32 e.printStackTrace(); 33 } 34 35 //配置編碼器參數 36 mediaCodec.configure(mediaFormat, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE); 37 38 //啟動編碼器 39 mediaCodec.start(); 40 } 41 42 public void StopEncoder() { 43 try { 44 mediaCodec.stop(); 45 mediaCodec.release(); 46 } catch (Exception e) { 47 e.printStackTrace(); 48 } 49 } 50 51 public boolean isRuning = false; 52 53 public void StartEncoderThread(final ISaveVideo saveVideo, final ICall callback) { 54 isRuning = true; 55 new Thread(new Runnable() { 56 @Override 57 public void run() { 58 byte[] input = null; 59 long pts = 0; 60 while (isRuning) { 61 // 訪問MainActivity用來緩衝待解碼數據的隊列 62 if(yuv_data == null){ 63 continue; 64 } 65 66 if (yuv_data != null) { 67 //從緩衝隊列中取出一幀 68 input = yuv_data; 69 pts = stamptime; 70 yuv_data = null; 71 byte[] yuv420sp = new byte[m_width * m_height * 3 / 2]; 72 73 NV21ToNV12(input, yuv420sp, m_width, m_height); 74 input = yuv420sp; 75 } 76 77 if (input != null) { 78 try { 79 //編碼器輸入緩衝區 80 ByteBuffer[] inputBuffers = mediaCodec.getInputBuffers(); 81 82 //編碼器輸出緩衝區 83 ByteBuffer[] outputBuffers = mediaCodec.getOutputBuffers(); 84 int inputBufferIndex = mediaCodec.dequeueInputBuffer(-1); 85 if (inputBufferIndex >= 0) { 86 ByteBuffer inputBuffer = inputBuffers[inputBufferIndex]; 87 inputBuffer.clear(); 88 //把轉換後的YUV420格式的視頻幀放到編碼器輸入緩衝區中 89 inputBuffer.put(input); 90 mediaCodec.queueInputBuffer(inputBufferIndex, 0, input.length, pts, 0); 91 } 92 93 MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo(); 94 int outputBufferIndex = mediaCodec.dequeueOutputBuffer(bufferInfo, TIMEOUT_USEC); 95 while (outputBufferIndex >= 0) { 96 //Log.i("AvcEncoder", "Get H264 Buffer Success! flag = "+bufferInfo.flags+",pts = "+bufferInfo.presentationTimeUs+""); 97 ByteBuffer outputBuffer = outputBuffers[outputBufferIndex]; 98 byte[] outData = new byte[bufferInfo.size]; 99 outputBuffer.get(outData); 100 if (bufferInfo.flags == BUFFER_FLAG_CODEC_CONFIG) { 101 configbyte = new byte[bufferInfo.size]; 102 configbyte = outData; 103 } else if (bufferInfo.flags == BUFFER_FLAG_KEY_FRAME) { 104 byte[] keyframe = new byte[bufferInfo.size + configbyte.length]; 105 System.arraycopy(configbyte, 0, keyframe, 0, configbyte.length); 106 //把編碼後的視頻幀從編碼器輸出緩衝區中拷貝出來 107 System.arraycopy(outData, 0, keyframe, configbyte.length, outData.length); 108 109 Logs.i("上傳I幀 " + keyframe.length); 110 byte[] send_data = new byte[13 + keyframe.length]; 111 System.arraycopy(new byte[]{0x01}, 0, send_data, 0, 1); 112 System.arraycopy(IntBytes.longToBytes(pts), 0, send_data, 1, 8); 113 System.arraycopy(IntBytes.intToByteArray(keyframe.length), 0, send_data, 9, 4); 114 System.arraycopy(keyframe, 0, send_data, 13, keyframe.length); 115 if(saveVideo != null){ 116 saveVideo.SaveVideoData(send_data); 117 } 118 119 if(callback != null){ 120 callback.callback(keyframe, pts); 121 } 122 } else { 123 byte[] send_data = new byte[13 + outData.length]; 124 System.arraycopy(new byte[]{0x02}, 0, send_data, 0, 1); 125 System.arraycopy(IntBytes.longToBytes(pts), 0, send_data, 1, 8); 126 System.arraycopy(IntBytes.intToByteArray(outData.length), 0, send_data, 9, 4); 127 System.arraycopy(outData, 0, send_data, 13, outData.length); 128 if(saveVideo != null){ 129 saveVideo.SaveVideoData(send_data); 130 } 131 132 if(callback != null){ 133 callback.callback(outData, pts); 134 } 135 } 136 137 mediaCodec.releaseOutputBuffer(outputBufferIndex, false); 138 outputBufferIndex = mediaCodec.dequeueOutputBuffer(bufferInfo, TIMEOUT_USEC); 139 } 140 141 } catch (Throwable t) { 142 t.printStackTrace(); 143 break; 144 } 145 } 146 } 147 } 148 }).start(); 149 } 150 151 private void NV21ToNV12(byte[] nv21, byte[] nv12, int width, int height) { 152 if (nv21 == null || nv12 == null) return; 153 int framesize = width * height; 154 int i = 0, j = 0; 155 System.arraycopy(nv21, 0, nv12, 0, framesize); 156 for (i = 0; i < framesize; i++) { 157 nv12[i] = nv21[i]; 158 } 159 160 for (j = 0; j < framesize / 2; j += 2) { 161 nv12[framesize + j - 1] = nv21[j + framesize]; 162 } 163 164 for (j = 0; j < framesize / 2; j += 2) { 165 nv12[framesize + j] = nv21[j + framesize - 1]; 166 } 167 } 168 }視頻編碼類Encoder
其中使用到了,介面用於,把採集和編碼後的數據,往外部傳遞,通過線程提交到服務端。或者通過本地解碼顯示,查看,編碼解碼時間差。
通過使用 ArrayBlockingQueue<byte[]> H264Queue = new ArrayBlockingQueue<byte[]>(10); 隊列,對介面提交數據,進行暫時保存,在後臺對數據,進行解碼或提交到服務端。
APP2:接入服務端,然後從I幀數據開始拿數據,(且數據是最新的I幀開始保存的數據)。同時需要把,之前採集得到的時間點傳給:
MediaCodec 對象的 queueInputBuffer 方法的時間戳參數(第四個)。
服務端:一幀一幀接收APP1傳入數據,對I幀開始的數據進行記錄,同時對非I幀開始的數據,進行丟棄。一次只保存一幀內容。讀取數據,並且移除已經添加數據,迴圈發送給APP2
public class VideoDecoder { private Thread mDecodeThread; private MediaCodec mCodec; private boolean mStopFlag = false; private int Video_Width = 640; private int Video_Height = 480; private int FrameRate = 25; private Boolean isUsePpsAndSps = false; private ReceiveVideoThread runThread = null; public VideoDecoder(String ip, int port, byte type, int roomId){ runThread = new ReceiveVideoThread(ip, port, type, roomId); new Thread(runThread).start(); } public void InitReadData(Surface surface){ try { //通過多媒體格式名創建一個可用的解碼器 mCodec = MediaCodec.createDecoderByType("video/avc"); } catch (IOException e) { e.printStackTrace(); } //初始化編碼器 final MediaFormat mediaformat = MediaFormat.createVideoFormat("video/avc", Video_Width, Video_Height); //設置幀率 mediaformat.setInteger(MediaFormat.KEY_FRAME_RATE, FrameRate); //https://developer.android.com/reference/android/media/MediaFormat.html#KEY_MAX_INPUT_SIZE //設置配置參數,參數介紹 : // format 如果為解碼器,此處表示輸入數據的格式;如果為編碼器,此處表示輸出數據的格式。 //surface 指定一個surface,可用作decode的輸出渲染。 //crypto 如果需要給媒體數據加密,此處指定一個crypto類. // flags 如果正在配置的對象是用作編碼器,此處加上CONFIGURE_FLAG_ENCODE 標簽。 mCodec.configure(mediaformat, surface, null, 0); startDecodingThread(); } private void startDecodingThread() { mCodec.start(); mDecodeThread = new Thread(new decodeH264Thread()); mDecodeThread.start(); } @RequiresApi(api = Build.VERSION_CODES.LOLLIPOP) private class decodeH264Thread implements Runnable { @Override public void run() { try { // saveDataLoop(); decodeLoop_New(); } catch (Exception e) { e.printStackTrace(); } } private void decodeLoop_New() { // 存放目標文件的數據 ByteBuffer[] inputBuffers = mCodec.getInputBuffers(); // 解碼後的數據,包含每一個buffer的元數據信息,例如偏差,在相關解碼器中有效的數據大小 MediaCodec.BufferInfo info = new MediaCodec.BufferInfo(); long timeoutUs = 1000; byte[] marker0 = new byte[]{0, 0, 0, 1}; byte[] dummyFrame = new byte[]{0x00, 0x00, 0x01, 0x20}; byte[] streamBuffer = null; while (true) { if(runThread.H264Queue.size() > 0){ streamBuffer = runThread.H264Queue.poll(); }else{ try { Thread.sleep(20); }catch (Exception ex){ } continue; } byte[] time_data = new byte[8]; System.arraycopy(streamBuffer, 0, time_data, 0, 8); long pts = IntBytes.bytesToLong(time_data); byte[] video_data = new byte[streamBuffer.length - 8]; System.arraycopy(streamBuffer, 8, video_data, 0, video_data.length); streamBuffer = video_data; Logs.i("得到 streamBuffer " + streamBuffer.length + " pts " + pts); int bytes_cnt = 0; mStopFlag = false; while (mStopFlag == false) { bytes_cnt = streamBuffer.length; if (bytes_cnt == 0) { streamBuffer = dummyFrame; } int startIndex = 0; int remaining = bytes_cnt; while (true) { if (remaining == 0 || startIndex >= remaining) { break; } int nextFrameStart = KMPMatch(marker0, streamBuffer, startIndex + 2, remaining); if (nextFrameStart == -1) { nextFrameStart = remaining; } else { } int inIndex = mCodec.dequeueInputBuffer(timeoutUs); if (inIndex >= 0) { ByteBuffer byteBuffer = inputBuffers[inIndex]; byteBuffer.clear(); byteBuffer.put(streamBuffer, startIndex, nextFrameStart - startIndex); //在給指定Index的inputbuffer[]填充數據後,調用這個函數把數據傳給解碼器 mCodec.queueInputBuffer(inIndex, 0, nextFrameStart - startIndex, pts, 0); startIndex = nextFrameStart; } else { continue; } int outIndex = mCodec.dequeueOutputBuffer(info, timeoutUs); if (outIndex >= 0) { //幀控制是不在這種情況下工作,因為沒有PTS H264是可用的 /* while (info.presentationTimeUs / 1000 > System.currentTimeMillis() - startMs) { try { Thread.sleep(100); } catch (InterruptedException e) { e.printStackTrace(); } } */ boolean doRender = (info.size != 0); //對outputbuffer的處理完後,調用這個函數把buffer重新返回給codec類。 // TODO:添加處理,保存原始幀數據 if (doRender) { Image image = mCodec.getOutputImage(outIndex); if (image != null) { // 通過反射 // 發送數據到指定介面 byte[] data = getDataFromImage(image, COLOR_FormatNV21); } } mCodec.releaseOutputBuffer(outIndex, doRender); } else { // Log.e(TAG, "bbbb"); } } mStopFlag = true; } // Logs.i("處理單幀視頻耗時:" + (System.currentTimeMillis() - c_start)); } } } private static final boolean VERBOSE = false; private static final long DEFAULT_TIMEOUT_US = 10000; private static final int COLOR_FormatI420 = 1; private static final int COLOR_FormatNV21 = 2; private static boolean isImageFormatSupported(Image image) { int format = image.getFormat(); switch (format) { case ImageFormat.YUV_420_888: case ImageFormat.NV21: case ImageFormat.YV12: return true; } return false; } @RequiresApi(api = Build.VERSION_CODES.LOLLIPOP) private static byte[] getDataFromImage(Image image, int colorFormat) { if (colorFormat != COLOR_FormatI420 && colorFormat != COLOR_FormatNV21) { throw new IllegalArgumentException("only support COLOR_FormatI420 " + "and COLOR_FormatNV21"); } if (!isImageFormatSupported(image)) { throw new RuntimeException("can't convert Image to byte array, format " + image.getFormat()); } Rect crop = image.getCropRect(); int format = image.getFormat(); int width = crop.width(); int height = crop.height(); Image.Plane[] planes = image.getPlanes(); byte[] data = new byte[width * height * ImageFormat.getBitsPerPixel(format) / 8]; byte[] rowData = new byte[planes[0].getRowStride()]; if (VERBOSE) Logs.i("get data from " + planes.length + " planes"); int channelOffset = 0; int outputStride = 1; for (int i = 0; i < planes.length; i++) { switch (i) { case 0: channelOffset = 0; outputStride = 1; break; case 1: if (colorFormat == COLOR_FormatI420) { channelOffset = width * height; outputStride = 1; } else if (colorFormat == COLOR_FormatNV21) { channelOffset = width * height + 1; outputStride = 2; } break; case 2: if (colorFormat == COLOR_FormatI420) { channelOffset = (int) (width * height * 1.25); outputStride = 1; } else if (colorFormat == COLOR_FormatNV21) { channelOffset = width * height; outputStride = 2; } break; } ByteBuffer buffer = planes[i].getBuffer(); int rowStride = planes[i].getRowStride(); int pixelStride = planes[i].getPixelStride(); if (VERBOSE) { Logs.i("pixelStride " + pixelStride); Logs.i("rowStride " + rowStride); Logs.i("width " + width); Logs.i("height " + height); Logs.i("buffer size " + buffer.remaining()); } int shift = (i == 0) ? 0 : 1; int w = width >> shift; int h = height >> shift; buffer.position(rowStride * (crop.top >> shift) + pixelStride * (crop.left >> shift)); for (int row = 0; row < h; row++) { int length; if (pixelStride == 1 && outputStride == 1) { length = w; buffer.get(data, channelOffset, length); channelOffset += length; } else { length = (w - 1) * pixelStride + 1; buffer.get(rowData, 0, length); for (int col = 0; col < w; col++) { data[channelOffset] = rowData[col * pixelStride]; channelOffset += outputStride; } } if (row < h - 1) { buffer.position(buffer.position() + rowStride - length); } } if (VERBOSE) Logs.i("Finished reading data from plane " + i); } return data; } private int KMPMatch(byte[] pattern, byte[] bytes, int start, int remain) { try { Thread.sleep(30); } catch (InterruptedException e) { e.printStackTrace(); } int[] lsp = computeLspTable(pattern); int j = 0; // Number of chars matched in pattern for (int i = start; i < remain; i++) { while (j > 0 && bytes[i] != pattern[j]) { // Fall back in the pattern j = lsp[j - 1]; // Strictly decreasing } if (bytes[i] == pattern[j]) { // Next char matched, increment position j++; if (j == pattern.length) return i - (j - 1); } } return -1; // Not found } private int[] computeLspTable(byte[] pattern) { int[] lsp = new int[pattern.length]; lsp[0] = 0; // Base case for (int i = 1; i < pattern.length; i++) { // Start by assuming we're extending the previous LSP int j = lsp[i - 1]; while (j > 0 && pattern[i] != pattern[j]) j = lsp[j - 1]; if (pattern[i] == pattern[j]) j++; lsp[i] = j; } return lsp; } public void StopDecode() { if(runThread != null){ runThread.StopReceive(); } } }視頻解碼類Decoder
總結:
通過對視頻的處理,學習到了,一些處理視頻的細節點。同時加深了,依賴導致在實際項目中的使用。to android.