• 基于K8S部署filebeat及logstash并输出到java程序中


    需求来源:

            采集K8S集群的容器日志,并集中存储。

    解决方案:

            1、DaemonSet 

                    以守护进程的方式运行Filebeat,Filebeat将采集日志通过logstash发送的JAVA程序中,再由JAVA程序处理后,集中存储起来。

            2、Sidecar 

                    每个POD中额外增加一个Filebeat容器,Filebeat通过文件共享方式,读取相应的日志并通过logstash发送到JAVA程序中。

    两个方式可以并存,并不冲突。DaemonSet方式采集容器的标准输出,如果有特殊需求,再通过Sidecar方式定制采集日志即可。

    下面介绍的是daemonSet方式采集容器日志的内容:

    先将K8S部署的yaml文件贴出来:

    1. # 创建账户
    2. apiVersion: v1
    3. kind: ServiceAccount
    4. metadata:
    5. labels:
    6. k8s-app: itsm-node-manager
    7. name: itsm-node-manager
    8. namespace: kube-system
    9. ---
    10. # 创建角色
    11. apiVersion: rbac.authorization.k8s.io/v1
    12. kind: ClusterRole
    13. metadata:
    14. labels:
    15. k8s-app: itsm-node-manager
    16. name: itsm-node-manager-role
    17. namespace: kube-system
    18. rules:
    19. - apiGroups:
    20. - ""
    21. resources:
    22. - nodes
    23. - namespaces
    24. - events
    25. - pods
    26. verbs:
    27. - get
    28. - list
    29. - watch
    30. ---
    31. # 账户与角色绑定
    32. apiVersion: rbac.authorization.k8s.io/v1
    33. kind: ClusterRoleBinding
    34. metadata:
    35. name: itsm-node-manager-role-binding
    36. namespace: kube-system
    37. roleRef:
    38. apiGroup: rbac.authorization.k8s.io
    39. kind: ClusterRole
    40. name: itsm-node-manager-role
    41. subjects:
    42. - kind: ServiceAccount
    43. name: itsm-node-manager
    44. namespace: kube-system
    45. ---
    46. # 创建logstash配置文件
    47. apiVersion: v1
    48. kind: ConfigMap
    49. metadata:
    50. labels:
    51. k8s-app: itsm-node-manager
    52. name: logstash-config
    53. namespace: kube-system
    54. data:
    55. logstash.yml: 'config.reload.automatic: true'
    56. pipeline.conf: |-
    57. input {
    58. beats {
    59. port => 5044
    60. codec => json
    61. }
    62. }
    63. filter {
    64. }
    65. output {
    66. http {
    67. http_method => "post"
    68. format => "json"
    69. # 此处配置程序的url路径,java代码会在下面贴出来。如果调用的是集群内部的程序,可以采用和filebeat一样的域名方式
    70. url => "http://192.168.0.195:8080/containerLog/insert"
    71. content_type => "application/json"
    72. }
    73. }
    74. ---
    75. # 创建logstash
    76. apiVersion: apps/v1
    77. kind: Deployment
    78. metadata:
    79. name: logstash
    80. namespace: kube-system
    81. labels:
    82. server: logstash-7.10.1
    83. spec:
    84. selector:
    85. matchLabels:
    86. k8s-app: logstash
    87. template:
    88. metadata:
    89. creationTimestamp: null
    90. labels:
    91. k8s-app: logstash
    92. name: logstash
    93. spec:
    94. containers:
    95. - image: elastic/logstash:7.10.1
    96. imagePullPolicy: IfNotPresent
    97. name: logstash
    98. securityContext:
    99. procMount: Default
    100. runAsUser: 0
    101. volumeMounts:
    102. - mountPath: /usr/share/logstash/config/logstash.yml
    103. name: logstash-config
    104. readOnly: true
    105. subPath: logstash.yml
    106. - mountPath: /usr/share/logstash/pipeline/logstash.conf
    107. name: logstash-config
    108. readOnly: true
    109. subPath: pipeline.conf
    110. dnsPolicy: ClusterFirst
    111. restartPolicy: Always
    112. schedulerName: default-scheduler
    113. securityContext: {}
    114. terminationGracePeriodSeconds: 120
    115. imagePullSecrets:
    116. - name: dockerpull
    117. volumes:
    118. - configMap:
    119. defaultMode: 420
    120. name: logstash-config
    121. name: logstash-config
    122. ---
    123. # 创建logstash service
    124. apiVersion: v1
    125. kind: Service
    126. metadata:
    127. labels:
    128. k8s-app: logstash
    129. name: logstash
    130. namespace: kube-system
    131. spec:
    132. type: ClusterIP
    133. selector:
    134. k8s-app: logstash
    135. ports:
    136. - port: 5044
    137. protocol: TCP
    138. targetPort: 5044
    139. ---
    140. # 创建filebeat配置文件
    141. apiVersion: v1
    142. kind: ConfigMap
    143. metadata:
    144. labels:
    145. k8s-app: itsm-node-manager
    146. name: filebeat-config
    147. namespace: kube-system
    148. data:
    149. filebeat.yml: |-
    150. filebeat.autodiscover:
    151. providers:
    152. - type: kubernetes
    153. host: ${NODE_NAME}
    154. hints.enabled: true
    155. hints.default_config:
    156. type: container
    157. paths:
    158. - /var/log/containers/*${data.kubernetes.container.id}.log
    159. processors:
    160. - add_cloud_metadata:
    161. - add_host_metadata:
    162. output.logstash:
    163. hosts: ["logstash.kube-system.svc.cluster.local:5044"] # kubectl -n logs get svc
    164. enabled: true
    165. ---
    166. # 创建filebeat守护进程
    167. apiVersion: apps/v1
    168. kind: DaemonSet
    169. metadata:
    170. name: filebeat
    171. namespace: kube-system
    172. labels:
    173. server: filebeat-7.10.1
    174. spec:
    175. selector:
    176. matchLabels:
    177. name: filebeat
    178. kubernetes.io/cluster-service: "true"
    179. template:
    180. metadata:
    181. creationTimestamp: null
    182. labels:
    183. name: filebeat
    184. kubernetes.io/cluster-service: "true"
    185. spec:
    186. containers:
    187. - args:
    188. - -c
    189. - /etc/filebeat.yml
    190. - -e
    191. env:
    192. - name: NODE_NAME
    193. valueFrom:
    194. fieldRef:
    195. apiVersion: v1
    196. fieldPath: spec.nodeName
    197. image: elastic/filebeat:7.10.1
    198. imagePullPolicy: IfNotPresent
    199. name: filebeat
    200. resources:
    201. limits:
    202. memory: 200Mi
    203. requests:
    204. cpu: 100m
    205. memory: 100Mi
    206. securityContext:
    207. procMount: Default
    208. runAsUser: 0
    209. volumeMounts:
    210. - mountPath: /etc/filebeat.yml
    211. name: config
    212. readOnly: true
    213. subPath: filebeat.yml
    214. - mountPath: /usr/share/filebeat/data
    215. name: data
    216. - mountPath: /var/lib/docker/containers
    217. name: varlibdockercontainers
    218. readOnly: true
    219. - mountPath: /var/log
    220. name: varlog
    221. readOnly: true
    222. restartPolicy: Always
    223. serviceAccount: itsm-node-manager
    224. serviceAccountName: itsm-node-manager
    225. volumes:
    226. - configMap:
    227. defaultMode: 384
    228. name: filebeat-config
    229. name: config
    230. - hostPath:
    231. path: /var/lib/docker/containers
    232. type: ""
    233. name: varlibdockercontainers
    234. - hostPath:
    235. path: /var/log
    236. type: ""
    237. name: varlog
    238. - hostPath:
    239. path: /opt/filebeat/data
    240. type: DirectoryOrCreate
    241. name: data

    这是将多个部署信息放在了一个yaml文件中,用“---”分隔开来。

    以下是JAVA代码片段:

    1. @Api(tags = "服务日志控制类")
    2. @Slf4j
    3. @RestController
    4. @RequestMapping("/containerLog")
    5. public class ContainerLogController {
    6. @Autowired
    7. private ContainerLogService containerLogService;
    8. @ApiOperation(value = "容器日志写入接口",produces = "application/json", response = String.class)
    9. @PostMapping("insert")
    10. public Result insert(HttpServletRequest httpServletRequest){
    11. BufferedReader br = null;
    12. StringBuilder sb = new StringBuilder("");
    13. try {
    14. br = httpServletRequest.getReader();
    15. String str;
    16. while ((str=br.readLine())!=null){
    17. sb.append(str);
    18. }
    19. containerLogService.insert(sb.toString());
    20. } catch (IOException e) {
    21. e.printStackTrace();
    22. }
    23. return Result.newSuccess();
    24. }
    25. }

    至此,便可获得logstash发送过来的日志信息了,容器日志均是Json格式。

    其中有三个地方可以根据需求进行扩展:

    1、filebeat采集规则

    2、logstash过滤规则

    3、程序处理逻辑

  • 相关阅读:
    陈宇(Aqua)-安全->云安全->多云安全
    Javascript知识【轮播图和定时弹广告案例&JS样式修改】
    【CodeForces】CF1700D River Locks
    我封装的一个REPR轮子 Biwen.QuickApi
    前端算法:链表,逆置,递归的写法
    认识微服务 SpringCloud (史上最全学习路线)
    Linux | 二级页表的虚拟地址是怎么转换的?
    hadoop集群搭建
    Unity记录5.5-地图-随机生成连续洞穴块
    扫雷游戏源码解析:构建你自己的MineSweeper
  • 原文地址:https://blog.csdn.net/zhanwentao2/article/details/128032736