折腾了三天终于搞好了
环境:ubantu Kylin 16.04
hadoop 2.7.1
用的林子雨老师配置好的那一套镜像
1:
在hadoop的安装目录下找到hdfs.h,include “hdfs.h”
2:
在cmake里添加一堆东西:这是我添加的目录,总之重点就是动态库libhdfs.so和libjvm.so,以及jvm里的一车jar包
cmake_minimum_required(VERSION 3.23)
project(clion)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_FLAGS -pthread)
#set(ENV{JAVA_HOME} /usr/lib/jvm/default-java)
#set(ENV{HADOOP_HOME} /usr/local/hadoop)
#set(ENV{CLASSPATH} /usr/local/hadoop/bin/hadoop classpath --glob)
message(STATUS "CMAKE_CXX_FLAGS = ${CMAKE_CXX_FLAGS}")
find_package(JNI REQUIRED)
include_directories(
${JNI_INCLUDE_DIRS}
${CMAKE_SOURCE_DIR}/include
)
include_directories(/usr/local/hadoop/include)
include_directories(/usr/lib/jvm/default-java/include/linux)
include_directories(/usr/lib/jvm/default-java/include)
LINK_DIRECTORIES(/usr/lib/jvm/default-java/jre/lib/amd64/server)
LINK_DIRECTORIES(/usr/local/hadoop/lib/native)
add_executable(clion main.cpp)
target_link_libraries(clion libhdfs.so)
target_link_libraries(clion libjvm.so)
感觉有不少东西都是没必要的(但能运行就不要删)
3:
进sudo -s,然后 vim /etc/profile,在/etc/profile的尾部添加
export JAVA_HOME=/usr/lib/jvm/default-java
export HADOOP_HOME=/usr/local/hadoop
然后还需要把hadoop里的jar包放进/etc/profile里
在新终端里输入
find /usr/local/hadoop/ -name *.jar|awk ‘{ printf(“export CLASSPATH=%s:$CLASSPATH\n”, $0); }’
(这个解决方法的原地址:https://blog.csdn.net/weixin_33775572/article/details/93954421)
然后把输出的内容全部copy进/etc/profile里
再运行的时候会提示有三个jar包的名字重复了,名字好像包含(SLF4J),总之删掉其中两个export就行了
修改profile好像需要重启
然后用C操作hdfs的话还需要关闭hdfs的安全模式
在终端输入bin/hadoop dfsadmin -safemode leave
然后再用以下代码进行测试:这个代码的功能就是在hdfs创建一个testfile2.txt的文件,然后写入Hello, World!2333,再读取文件并打印。
运行的时候有可能会出一个警告
WARN [main] util.NativeCodeLoader (NativeCodeLoader.java:<clinit>(62)) - Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
在终端看一下有没有成功吧,如果成功了就不要管这个警告了(
如果不成功的话可能是hadoop_native库的问题,可以看看这个https://blog.csdn.net/solike8/article/details/88424001#
以上大概就是各种bug了
代码来源:https://www.cnblogs.com/caoyingjie/p/3794250.html#
#include
#include
#include
#include "hdfs.h"
int HdfsOperator::test() {
cout<<"done"<<endl;
//写
hdfsFS fs = hdfsConnect("127.0.0.1", 9000);
if(!fs){
fprintf(stderr, "Failed to connect to hdfs.\n");return 1;
}
const char* writePath = "/user/hadoop/testfile2.txt";
hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
if(!writeFile){
fprintf(stderr, "Failed to open %s for writing!\n", writePath);return 1;
}
const char* buffer = "Hello, World!2333";
tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer)+1);
if (hdfsFlush(fs, writeFile)){
fprintf(stderr, "Failed to 'flush' %s\n", writePath);return 1;
}
hdfsCloseFile(fs, writeFile);
//读
unsigned bufferSize=1024;
const char* readPath = "/user/hadoop/testfile2.txt";
hdfsFile readFile = hdfsOpenFile(fs, readPath, O_RDONLY, bufferSize, 0, 0);
if(!readFile){
fprintf(stderr,"couldn't open file %s for reading\n",readPath);
return -2;
}
char* rbuffer = (char*)malloc(sizeof(char) * (bufferSize+1));
if(rbuffer == NULL) {
return -2;
}
tSize curSize = bufferSize;
for (; curSize == bufferSize;) {
curSize = hdfsRead(fs, readFile, (void*)rbuffer, curSize);
rbuffer[curSize]='\0';
fprintf(stdout, "read '%s' from file!\n", rbuffer);
}
free(rbuffer);
hdfsCloseFile(fs, readFile);
hdfsDisconnect(fs);
cout<<"done"<<endl;
return 0;
}