老板要求做一个用shader渲染图像的Android app,毕竟是安卓,恰好OpenGL ES也提供了Java接口,Github上大部分代码都是Java实现的,在同一中语言体系下想要画三角等入门还是方便,但其实呢,还是建议用C++来编写OpenGL相关的代码,原因有二:1. OpenGL接口的官方教程是基于C++的,初学者查函数调用很方便;2. 我是做图像处理,万一说不好用到类似OpenCV库呢。。。于是,我被折磨了两天入门,这篇博客主要记录一些NDK实现逻辑,以及如何实现一些基于NDK的OpenGL基础渲染效果。
首先,学习OpenGL接口方面,强推learnOpenGL,每一个知识点都有Example且配有源码,循序渐进,能通过案例慢慢地讲清楚很多初学者疑惑的地方。
另外,很感谢以下博主的分享, 在我自闭的时候提供了太多帮助,毕竟网上资料真的不多。
也能优先参考他们的代码,我不过是代码的搬运工哈哈~:
NDK

主要写3个类:MainActivity, MyGLSurfaceView,MyRenderer ,主程序显示MyGLSurfaceView的渲染内容,具体渲染实现由MyRenderer定义。GLSurfaceView.Renderer派生类需要重写三个方法:onSurfaceCreated(初始化),onSurfaceChanged(屏幕窗口大小变化),onDrawFrame(渲染),这三个方法是用C++实现,所以这三个方法是对NativeImpl类相应方法的简单调用。
package com.example.c_ndktest;
import androidx.appcompat.app.AppCompatActivity;
import android.content.Context;
import android.opengl.GLSurfaceView;
import android.os.Bundle;
import com.example.c_ndktest.databinding.ActivityMainBinding;
import javax.microedition.khronos.egl.EGLConfig;
import javax.microedition.khronos.opengles.GL10;
public class MainActivity extends AppCompatActivity {
private GLSurfaceView gLView;
private MyRenderer renderer;
// public MyRenderer mrenderer;
// Used to load the 'c_ndktest' library on application startup.
static {
System.loadLibrary("native-lib");
}
private ActivityMainBinding binding;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
gLView = new MyGLSurfaceView(this);
setContentView(gLView);
}
/**
* A native method that is implemented by the 'c_ndktest' native library,
* which is packaged with this application.
*/
public native String stringFromJNI();
private class MyGLSurfaceView extends GLSurfaceView {
public MyGLSurfaceView(Context context) {
super(context);
setEGLContextClientVersion(3);
renderer = new MyRenderer();
setRenderer(renderer);
}
}
private class MyRenderer implements GLSurfaceView.Renderer {
@Override
public void onSurfaceCreated(GL10 gl10, EGLConfig eglConfig) {
NativeImpl.init();
}
@Override
public void onSurfaceChanged(GL10 gl10, int width, int height) {
NativeImpl.OnSurfaceChanged(width, height);
}
@Override
public void onDrawFrame(GL10 gl10) {
NativeImpl.draw();
}
}
}
package com.example.c_ndktest;
public class NativeImpl {
static {
System.loadLibrary("native-lib");
}
public static native String stringFromJNI();
public static native void init();
public static native void OnSurfaceChanged(int width,int height);
public static native void draw();
}
static部分写C文件生成的动态库名,调用他们,在CMakeLists中定义生成库的名字。
若没有找到定义会报错,定义完后,左侧应该会显示cpp文件的符号,点击跳转过去就是C实现。


首先include实现渲染的类的头文件TriangleDemo.h,定义一个全局的类对象trangleDemo,Native层先简单调用成员函数实现接口。可以看到命名规则为:包名_Java类名_方法名(){}。
#include
#include
#include "demo/TriangleDemo.h"
TriangleDemo *triangleDemo = NULL;
extern "C" JNIEXPORT jstring JNICALL
Java_com_example_c_1ndktest_MainActivity_stringFromJNI(
JNIEnv* env,
jobject clazz) {
std::string hello = "Hello NativeImpl from C++";
return env->NewStringUTF(hello.c_str());
}
extern "C" JNIEXPORT void JNICALL
Java_com_example_c_1ndktest_NativeImpl_init(JNIEnv *env, jclass instance) {
if (triangleDemo == NULL) {
triangleDemo = new TriangleDemo();
}
triangleDemo->Init();
}
extern "C" JNIEXPORT void JNICALL
Java_com_example_c_1ndktest_NativeImpl_OnSurfaceChanged(JNIEnv *env, jclass instance,jint width, jint height) {
triangleDemo->OnSurfaceChanged(width,height);
}
extern "C" JNIEXPORT void JNICALL
Java_com_example_c_1ndktest_NativeImpl_draw(JNIEnv *env, jclass instance) {
triangleDemo->draw();
}
在类中定义一些句柄成员变量,默认为0是NULL,成员函数就是三个方法,另外还有一个顶点数组对象VAO和顶点缓冲对象VBO。
#ifndef C_NDKTEST_TRIANGLEDEMO_H
#define C_NDKTEST_TRIANGLEDEMO_H
#include
class TriangleDemo {
public:
TriangleDemo(){
m_ProgramObj = 0;
m_VertexShader = 0;
m_FragmentShader = 0;
};
void Init();
void draw();
void OnSurfaceChanged(int width, int height);
GLuint m_ProgramObj;
GLuint m_VertexShader;
GLuint m_FragmentShader;
GLuint VBO, VAO;
};
#endif //C_NDKTEST_TRIANGLEDEMO_H
终于到真正的渲染程序了!太不容易了。。
TriangleDemo::Init()

生成VAO和VBO的流程
多个program配多个VAO的Example:

不说了,我没定义变换矩阵,就简单赋值。
#include "../util/logUtil.h"
#include "../util/GLUtil.h"
#include "TriangleDemo.h"
#include
void TriangleDemo::draw() {
if(m_ProgramObj == 0)
return;
glClear(GL_STENCIL_BUFFER_BIT | GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glClearColor(1.0, 0.5, 1.0, 1.0);
glUseProgram (m_ProgramObj);
glBindVertexArray(VAO);
glDrawArrays(GL_TRIANGLES, 0, 3);
glBindVertexArray(0);
}
void TriangleDemo::Init() {
if (m_ProgramObj != 0) return;
char vShaderStr[] =
"#version 300 es \n"
"layout(location = 0) in vec3 vPosition; \n"
"layout(location = 1) in vec3 vColor; \n"
"out vec3 ourColor; \n"
"void main() \n"
"{ \n"
" gl_Position = vec4(vPosition.xyz, 1.0); \n"
" ourColor = vColor; \n"
"} \n";
char fShaderStr[] =
"#version 300 es \n"
"precision mediump float; \n"
"out vec4 fragColor; \n"
"in vec3 ourColor; \n"
"void main() \n"
"{ \n"
" fragColor = vec4 ( ourColor.rgb, 1.0 ); \n"
"} \n";
LOGCATE("init BgDemo");
float vertices[] = {
// positions // colors
0.5f, -0.5f, 0.0f, 1.0f, 0.0f, 0.0f, // bottom right
-0.5f, -0.5f, 0.0f, 0.0f, 1.0f, 0.0f, // bottom left
0.0f, 0.5f, 0.0f, 0.0f, 0.0f, 1.0f // top
};
glClearColor(1.0, 0.5, 1.0, 1.0);
m_ProgramObj = GLUtil::CreateProgram(vShaderStr, fShaderStr, m_VertexShader, m_FragmentShader);
glUseProgram (m_ProgramObj);
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
// position attribute
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(float), (void*)0);
glEnableVertexAttribArray(0);
// color attribute
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(float), (void*)(3 * sizeof(float)));
glEnableVertexAttribArray(1);
}
void TriangleDemo::OnSurfaceChanged(int width, int height) {
LOGCATE("MyGLRenderContext::OnSurfaceChanged [w, h] = [%d, %d]", width, height);
glViewport(0, 0, width, height);
}
#ifndef OPENGLDEMO_GLUTIL_H
#define OPENGLDEMO_GLUTIL_H
#include
class GLUtil {
public:
static GLuint CreateProgram(const char *pVertexShaderSource, const char *pFragShaderSource,
GLuint &vertexShaderHandle,
GLuint &fragShaderHandle);
static GLuint LoadShader(GLenum shaderType, const char *pSource);
static void CheckGLError(const char *pGLOperation);
static void test();
};
#endif //OPENGLDEMO_GLUTIL_H
主要是写编译shader的过程,不赘述。
//
// Created by Lai on 2020/11/8.
//
#include
#include "GLUtil.h"
#include "logUtil.h"
GLuint GLUtil::CreateProgram(const char *pVertexShaderSource, const char *pFragShaderSource,
GLuint &vertexShaderHandle, GLuint &fragShaderHandle) {
GLuint program = 0;
FUN_BEGIN_TIME("GLUtils::CreateProgram");
vertexShaderHandle = LoadShader(GL_VERTEX_SHADER, pVertexShaderSource);
if (!vertexShaderHandle) return program;
fragShaderHandle = LoadShader(GL_FRAGMENT_SHADER, pFragShaderSource);
if (!fragShaderHandle) return program;
program = glCreateProgram();
if (program)
{
glAttachShader(program, vertexShaderHandle);
CheckGLError("glAttachShader");
glAttachShader(program, fragShaderHandle);
CheckGLError("glAttachShader");
glLinkProgram(program);
GLint linkStatus = GL_FALSE;
// Returns the requested object parameter.
glGetProgramiv(program, GL_LINK_STATUS, &linkStatus);
// Detaches a shader object from a program object to which it is attached
// after compiling the shader, delete it.
glDetachShader(program, vertexShaderHandle);
glDeleteShader(vertexShaderHandle);
vertexShaderHandle = 0;
glDetachShader(program, fragShaderHandle);
glDeleteShader(fragShaderHandle);
fragShaderHandle = 0;
if (linkStatus != GL_TRUE)
{
GLint bufLength = 0;
glGetProgramiv(program, GL_INFO_LOG_LENGTH, &bufLength);
if (bufLength)
{
char* buf = (char*) malloc((size_t)bufLength);
if (buf)
{
glGetProgramInfoLog(program, bufLength, NULL, buf);
LOGCATE("GLUtils::CreateProgram Could not link program:\n%s\n", buf);
free(buf);
}
}
glDeleteProgram(program);
program = 0;
}
}
FUN_END_TIME("GLUtils::CreateProgram")
LOGCATE("GLUtils::CreateProgram program = %d", program);
return program;
}
void GLUtil::test() {
LOGCATE("TEST!!!!!");
}
GLuint GLUtil::LoadShader(GLenum shaderType, const char *pSource)
{
GLuint shader = 0;
FUN_BEGIN_TIME("GLUtils::LoadShader")
shader = glCreateShader(shaderType);
if (shader)
{
glShaderSource(shader, 1, &pSource, NULL);
glCompileShader(shader);
GLint compiled = 0;
glGetShaderiv(shader, GL_COMPILE_STATUS, &compiled);
if (!compiled)
{
GLint infoLen = 0;
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLen);
if (infoLen)
{
char* buf = (char*) malloc((size_t)infoLen);
if (buf)
{
glGetShaderInfoLog(shader, infoLen, NULL, buf);
LOGCATE("GLUtils::LoadShader Could not compile shader %d:\n%s\n", shaderType, buf);
free(buf);
}
glDeleteShader(shader);
shader = 0;
}
}
}
FUN_END_TIME("GLUtils::LoadShader")
return shader;
}
void GLUtil::CheckGLError(const char *pGLOperation)
{
for (GLint error = glGetError(); error; error = glGetError())
{
LOGCATE("GLUtils::CheckGLError GL Operation %s() glError (0x%x)\n", pGLOperation, error);
}
}
日志宏定义,还挺重要的。
#ifndef OPENGLDEMO_LOGUTIL_H
#define OPENGLDEMO_LOGUTIL_H
#include
#include
#define LOG_TAG "OpenGlDemo"
#define LOGCATE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__)
#define LOGCATV(...) __android_log_print(ANDROID_LOG_VERBOSE,LOG_TAG,__VA_ARGS__)
#define LOGCATD(...) __android_log_print(ANDROID_LOG_DEBUG,LOG_TAG,__VA_ARGS__)
#define LOGCATI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__)
#define FUN_BEGIN_TIME(FUN) {\
LOGCATE("%s:%s func start", __FILE__, FUN); \
long long t0 = GetSysCurrentTime();
#define FUN_END_TIME(FUN) \
long long t1 = GetSysCurrentTime(); \
LOGCATE("%s:%s func cost time %ldms", __FILE__, FUN, (long)(t1-t0));}
#define BEGIN_TIME(FUN) {\
LOGCATE("%s func start", FUN); \
long long t0 = GetSysCurrentTime();
#define END_TIME(FUN) \
long long t1 = GetSysCurrentTime(); \
LOGCATE("%s func cost time %ldms", FUN, (long)(t1-t0));}
static long long GetSysCurrentTime()
{
struct timeval time;
gettimeofday(&time, NULL);
long long curTime = ((long long)(time.tv_sec))*1000+time.tv_usec/1000;
return curTime;
}
#define GO_CHECK_GL_ERROR(...) LOGCATE("CHECK_GL_ERROR %s glGetError = %d, line = %d, ", __FUNCTION__, glGetError(), __LINE__)
#define DEBUG_LOGCATE(...) LOGCATE("DEBUG_LOGCATE %s line = %d", __FUNCTION__, __LINE__)
#endif //OPENGLDEMO_LOGUTIL_H
# For more information about using CMake with Android Studio, read the
# documentation: https://d.android.com/studio/projects/add-native-code.html
# Sets the minimum version of CMake required to build the native library.
cmake_minimum_required(VERSION 3.18.1)
# Declares and names the project.
project("c_ndktest")
file(GLOB_RECURSE ALL_SOURCE "*.cpp", "*.h", "*.h")
include_directories(
util
demo
)
# Creates and names a library, sets it as either STATIC
# or SHARED, and provides the relative paths to its source code.
# You can define multiple libraries, and CMake builds them for you.
# Gradle automatically packages shared libraries with your APK.
add_library( # Sets the name of the library.
native-lib
# Sets the library as a shared library.
SHARED
# Provides a relative path to your source file(s).
# native-lib.cpp
${ALL_SOURCE}
demo/TriangleDemo.cpp)
# Searches for a specified prebuilt library and stores the path as a
# variable. Because CMake includes system libraries in the search path by
# default, you only need to specify the name of the public NDK library
# you want to add. CMake verifies that the library exists before
# completing its build.
find_library( # Sets the name of the path variable.
log-lib
# Specifies the name of the NDK library that
# you want CMake to locate.
log)
# Specifies libraries CMake should link to your target library. You
# can link multiple libraries, such as libraries you define in this
# build script, prebuilt third-party libraries, or system libraries.
target_link_libraries( # Specifies the target library.
native-lib
# Links the target library to the log library
# included in the NDK.
${log-lib}
GLESv3)
注意,修改文件源文件后要同步编译一下,否则会出现未定义错误:

一般NDK中出现未定义就两种情况:1.没在CMake里写外部链接库,要在target_link_libraries中写;2.没同步,要手动随便修改一下CMake,同步一下就好。

这里还有一个问题是我们的shader是写成字符串形式,对书写很不方便,想引入外部文件。
首先说明一下,通过c++ fstream文件读取方式基本不现实,因为他是以生成的target为根目录找文件,你怎么知道target在哪,错误的文件路径就找不到文件。
这里通过AssetManager取数据,文件必须放在assets目录下

大致思路是,在C++中获取Java AssetManager对象,然后调用AAssetManager_open和AAsset_read函数读取文件。
先来看Java修改部分,我想的是在初始化时,先将AssetManager对象保存为MainActivity私有变量,防止空引用

随后在NativeImpl中增加一个接口传入AssetManager对象

这个接口函数在MyRenderer.onSurfaceCreated()被调用

这样,mrg对象就传入c++了。
然后 来看c++修改部分,先定义Native层的接口函数,将mrg对象保存在全局变量g_mrg。

util目录下,新建一个ReadFileUtil.h文件,写使用AssetManager传数据的函数 LoadFileContent(),传出unsigned char*数据地址。
#ifndef C_NDKTEST_READFILEUTIL_H
#define C_NDKTEST_READFILEUTIL_H
#include
#include
const unsigned char *LoadFileContent(const char *filepath);
AAssetManager *g_mrg;
const unsigned char *LoadFileContent(const char *filepath) {
// read shader code form asset
unsigned char *fileContent = nullptr;
AAsset *asset = AAssetManager_open(g_mrg, filepath, AASSET_MODE_UNKNOWN);
if (asset == nullptr) {
// LOGE("LoadFileContent asset is null, load shader error ");
LOGCATE("LoadFileContent asset is null, load shader error ");
}
int filesSize_v = AAsset_getLength(asset);
fileContent = new unsigned char[filesSize_v];
AAsset_read(asset, fileContent, filesSize_v);
fileContent[filesSize_v] = '\0';
AAsset_close(asset);
return fileContent;
}
#endif //C_NDKTEST_READFILEUTIL_H
随后我们在初始化函数中使用上面定义的函数把shader的字符数据存起来就好了

TriangleDemo类新增成员函数getShaderCode

至此,shader文件就成功读入了~
老样子,来看看新的目录结构吧,新添加TextureDemo类,另外需要在ReadFileUtil.h中添加读图片的方法,修改native-lib.cpp确保java调用新的方法。

#ifndef C_NDKTEST_TEXTUREDEMO_H
#define C_NDKTEST_TEXTUREDEMO_H
#include
class TextureDemo {
public:
TextureDemo(){
m_ProgramObj = 0;
m_VertexShader = 0;
m_FragmentShader = 0;
};
void Init();
void draw();
void OnSurfaceChanged(int width, int height);
void getShaderCode(const char *cs, int flag);
void getTexturedata(unsigned char *data, int width, int height);
GLuint m_ProgramObj;
GLuint m_VertexShader;
GLuint m_FragmentShader;
GLuint VBO, VAO, EBO;
const char *vs;
const char *fs;
unsigned char *texturedata;
int texturewidth, textureheight;
unsigned int texture;
};
#endif //C_NDKTEST_TEXTUREDEMO_H
渲染程序主要有两个地方要改,添加EBO对象,画texture,另外因为两个三角拼成一个方形,要画4个顶点。
我们知道复杂图案都是用三角形拼出来的,EBO就是用来告诉程序,在VBO存储的众多顶点中,用哪几个顶点一组画三角形,简单说就是存储顶点序号的buffer。注意与坐标buffer区别:

画texture,主要添加纹理坐标系下,顶点对应的坐标点texture coords,这样只要这个像素点在图元内,图元中间的像素值会在顶点之间坐标的插值处采样而来。方法也是生成绑定那一套,不多说了。
#include "../util/logUtil.h"
#include "../util/GLUtil.h"
#include "TextureDemo.h"
#include
void TextureDemo::draw() {
if(m_ProgramObj == 0)
return;
glClear(GL_STENCIL_BUFFER_BIT | GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glClearColor(0.5, 0.5, 1.0, 1.0);
glUseProgram (m_ProgramObj);
glBindTexture(GL_TEXTURE_2D, texture);
glBindVertexArray(VAO);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
glBindVertexArray(0);
}
void TextureDemo::Init() {
if (m_ProgramObj != 0) return;
LOGCATE("init BgDemo");
float vertices[] = {
// positions // colors // texture coords
1.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, // top right
1.0f, -1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, // bottom right
-1.0f, -1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, // bottom left
-1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f // top left
};
// 注意是uint
unsigned int indices[] = {
0, 1, 3,
1, 2, 3
};
glClearColor(0.5, 0.5, 1.0, 1.0);
m_ProgramObj = GLUtil::CreateProgram(vs, fs, m_VertexShader, m_FragmentShader);
glUseProgram (m_ProgramObj);
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
glGenBuffers(1, &EBO);
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
// 注意是elemet_array_buffer类型
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW);
// trans data to shader program
// position attribute
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)0);
glEnableVertexAttribArray(0);
// color attribute
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)(3 * sizeof(float)));
glEnableVertexAttribArray(1);
// texcoord attribute
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)(6 * sizeof(float)));
glEnableVertexAttribArray(2);
// texture
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, texturewidth, textureheight, 0, GL_RGB, GL_UNSIGNED_BYTE, texturedata);
glGenerateMipmap(GL_TEXTURE_2D);
}
void TextureDemo::OnSurfaceChanged(int width, int height) {
LOGCATE("MyGLRenderContext::OnSurfaceChanged [w, h] = [%d, %d]", width, height);
glViewport(0, 0, width, height);
}
void TextureDemo::getShaderCode(const char *cs, int flag) {
if (flag == 0){
vs = cs;
}else{
fs = cs;
}
}
void TextureDemo::getTexturedata(unsigned char *data, int width, int height){
texturedata = data;
texturewidth = width;
textureheight = height;
}
发现读bmp文件不用做什么其他decode操作,比较方便,建议纹理转成bmp文件比较好。
另外我第一次显示图片时,会错误地将彩色显示成黑白,将图片的长宽改成2的倍数就不会出错。
#ifndef C_NDKTEST_READFILEUTIL_H
#define C_NDKTEST_READFILEUTIL_H
#include
#include
#define STB_IMAGE_IMPLEMENTATION
const unsigned char *LoadFileContent(const char *filepath);
unsigned char *DecodeBMP(unsigned char *bmpFileData, int &width, int &height);
unsigned char *ReadBMP(const char *bmpPath, int &width, int &height);
AAssetManager *g_mrg;
const unsigned char *LoadFileContent(const char *filepath) {
// read shader code form asset
unsigned char *fileContent = nullptr;
AAsset *asset = AAssetManager_open(g_mrg, filepath, AASSET_MODE_UNKNOWN);
if (asset == nullptr) {
// LOGE("LoadFileContent asset is null, load shader error ");
LOGCATE("LoadFileContent asset is null, load shader error ");
}
int filesSize_v = AAsset_getLength(asset);
fileContent = new unsigned char[filesSize_v];
AAsset_read(asset, fileContent, filesSize_v);
fileContent[filesSize_v] = '\0';
AAsset_close(asset);
return fileContent;
}
unsigned char *DecodeBMP(unsigned char *bmpFileData, int &width, int &height) {
if (0x4D42 == *((unsigned short *) bmpFileData)) { // 数据头是否为0x4D42 判断是否是 24 位的位图,
// 读格式头
int pixelDataOffset = *((int *) (bmpFileData + 10));// 取出像素数据在内存块的偏移地址
width = *((int *) (bmpFileData + 18));
height = *((int *) (bmpFileData + 22));
unsigned char *pixelData = bmpFileData + pixelDataOffset;
// 位图像素数据是 BGR 排布的,所以更换 r b 的位置
for (int i = 0; i < width * height * 3; i += 3) {
unsigned char temp = pixelData[i];
pixelData[i] = pixelData[i + 2];
pixelData[i + 2] = temp;
}
LOGCATE("DecodeBMP success ");
return pixelData;
}
LOGCATE("DecodeBMP error ");
return nullptr;
}
unsigned char *ReadBMP(const char *bmpPath, int &width, int &height) {
// int nFileSize = 0;
unsigned char *bmpFileContent = const_cast<unsigned char *>(LoadFileContent(bmpPath));
if (bmpFileContent == NULL) {
return 0;
}
// int bmpWidth = 0, bmpHeight = 0;
unsigned char *pixelData = DecodeBMP(bmpFileContent, width, height);
if (pixelData == NULL) {
delete[] bmpFileContent;
LOGCATE("CreateTextureFromBMP error ");
return 0;
}
// GLuint texture = CreateTexture2D(pixelData, bmpWidth, bmpHeight, GL_RGB);
// delete[] bmpFileContent;
// LOGCATE("CreateTextureFromBMP success ");
// return texture;
return bmpFileContent;
}
#endif //C_NDKTEST_READFILEUTIL_H
添加读图片的调用
#include
#include
#include "demo/BgDemo.h"
#include "demo/TextureDemo.h"
#include "logUtil.h"
#include "ReadFileUtil.h"
//BgDemo *triangleDemo = NULL;
TextureDemo *triangleDemo = NULL;
extern "C" JNIEXPORT jstring JNICALL
Java_com_example_c_1ndktest_MainActivity_stringFromJNI(
JNIEnv* env,
jobject clazz) {
std::string hello = "Hello NativeImpl from C++";
return env->NewStringUTF(hello.c_str());
}
extern "C" JNIEXPORT void JNICALL
Java_com_example_c_1ndktest_NativeImpl_init(JNIEnv *env, jclass instance) {
if (triangleDemo == NULL) {
// triangleDemo = new BgDemo();
triangleDemo = new TextureDemo();
}
// load shader
triangleDemo->getShaderCode((char *) LoadFileContent("vertex.vs"), 0);
triangleDemo->getShaderCode((char *) LoadFileContent("fragment.fs"), 1);
int width = 0, height = 0;
unsigned char *img = ReadBMP("f.bmp", width, height);
triangleDemo->getTexturedata(img, width, height);
triangleDemo->Init();
}
extern "C" JNIEXPORT void JNICALL
Java_com_example_c_1ndktest_NativeImpl_OnSurfaceChanged(JNIEnv *env, jclass instance,jint width, jint height) {
triangleDemo->OnSurfaceChanged(width,height);
}
extern "C" JNIEXPORT void JNICALL
Java_com_example_c_1ndktest_NativeImpl_draw(JNIEnv *env, jclass instance) {
triangleDemo->draw();
}
extern "C"
JNIEXPORT jstring JNICALL
Java_com_example_c_1ndktest_NativeImpl_stringFromJNI(JNIEnv *env, jclass clazz) {
std::string hello = "Hello NativeImpl from C++";
return env->NewStringUTF(hello.c_str());
}
extern "C"
JNIEXPORT void JNICALL
Java_com_example_c_1ndktest_NativeImpl_InitScene(JNIEnv *env, jclass clazz, jobject mrg) {
g_mrg = AAssetManager_fromJava(env, mrg);
}

入门教程就结束了 ~其实还有很多问题,比如坐标变换,还有翻转屏幕图片消失等问,compute shader学习等等,下期再说吧 ~