Compare commits

...

4 Commits

Author SHA1 Message Date
tomsun28
e4e9b0d187 [home]适配支持Linux操作系统监控帮助文档 2022-03-12 14:06:13 +08:00
tomsun28
ffa3194113 [web-app]前端放开操作系统 2022-03-12 10:57:25 +08:00
tomsun28
f52247df67 [collector,manager]linux监控类型新增内存,磁盘,网络监控指标 2022-03-12 10:51:22 +08:00
tomsun28
25d692cf37 [collector,manager]支持Linux操作系统监控类型 2022-03-11 23:57:55 +08:00
16 changed files with 589 additions and 5 deletions

View File

@@ -97,6 +97,12 @@
<artifactId>postgresql</artifactId> <artifactId>postgresql</artifactId>
<version>42.3.3</version> <version>42.3.3</version>
</dependency> </dependency>
<!-- linux ssh -->
<dependency>
<groupId>org.apache.sshd</groupId>
<artifactId>sshd-core</artifactId>
<version>2.8.0</version>
</dependency>
</dependencies> </dependencies>
</project> </project>

View File

@@ -21,9 +21,9 @@ import java.util.concurrent.TimeUnit;
public class CommonCache { public class CommonCache {
/** /**
* 默认缓存时间 30minute * 默认缓存时间 10minute
*/ */
private static final long DEFAULT_CACHE_TIMEOUT = 30 * 60 * 1000L; private static final long DEFAULT_CACHE_TIMEOUT = 10 * 60 * 1000L;
/** /**
* 默认最大缓存数量 * 默认最大缓存数量
@@ -155,6 +155,15 @@ public class CommonCache {
}); });
} }
/**
* 新增或更新cache
* @param key 存储对象key
* @param value 存储对象
*/
public void addCache(Object key, Object value) {
addCache(key, value, DEFAULT_CACHE_TIMEOUT);
}
/** /**
* 根据缓存key获取缓存对象 * 根据缓存key获取缓存对象
* @param key key * @param key key

View File

@@ -0,0 +1,25 @@
package com.usthe.collector.collect.common.ssh;
import lombok.extern.slf4j.Slf4j;
import org.apache.sshd.client.SshClient;
/**
* ssh公共client
* @author tom
* @date 2022/3/11 15:58
*/
@Slf4j
public class CommonSshClient {
private static SshClient sshClient;
static {
sshClient = SshClient.setUpDefaultClient();
sshClient.start();
}
public static SshClient getSshClient() {
return sshClient;
}
}

View File

@@ -0,0 +1,202 @@
package com.usthe.collector.collect.ssh;
import com.usthe.collector.collect.AbstractCollect;
import com.usthe.collector.collect.common.cache.CacheIdentifier;
import com.usthe.collector.collect.common.cache.CommonCache;
import com.usthe.collector.collect.common.ssh.CommonSshClient;
import com.usthe.collector.util.CollectorConstants;
import com.usthe.common.entity.job.Metrics;
import com.usthe.common.entity.job.protocol.SshProtocol;
import com.usthe.common.entity.message.CollectRep;
import com.usthe.common.util.CommonConstants;
import lombok.extern.slf4j.Slf4j;
import org.apache.sshd.client.SshClient;
import org.apache.sshd.client.channel.ClientChannel;
import org.apache.sshd.client.channel.ClientChannelEvent;
import org.apache.sshd.client.session.ClientSession;
import org.springframework.util.StringUtils;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.net.ConnectException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
/**
* ssh协议采集实现
* @author tom
* @date 2022/03/11 15:10
*/
@Slf4j
public class SshCollectImpl extends AbstractCollect {
private static final String PARSE_TYPE_ONE_ROW = "oneRow";
private static final String PARSE_TYPE_MULTI_ROW = "multiRow";
private SshCollectImpl(){}
public static SshCollectImpl getInstance() {
return SshCollectImpl.Singleton.INSTANCE;
}
@Override
public void collect(CollectRep.MetricsData.Builder builder, long appId, String app, Metrics metrics) {
long startTime = System.currentTimeMillis();
// 校验参数
try {
validateParams(metrics);
} catch (Exception e) {
builder.setCode(CollectRep.Code.FAIL);
builder.setMsg(e.getMessage());
return;
}
SshProtocol sshProtocol = metrics.getSsh();
// 超时时间默认300毫秒
int timeout = 3000;
try {
timeout = Integer.parseInt(sshProtocol.getTimeout());
} catch (Exception e) {
log.warn(e.getMessage());
}
try {
ClientSession clientSession = getConnectSession(sshProtocol, timeout);
ClientChannel channel = clientSession.createExecChannel(sshProtocol.getScript());
ByteArrayOutputStream response = new ByteArrayOutputStream();
channel.setOut(response);
if (!channel.open().verify(timeout).isOpened()) {
throw new Exception("open failed");
}
List<ClientChannelEvent> list = new ArrayList<>();
list.add(ClientChannelEvent.CLOSED);
channel.waitFor(list, timeout);
Long responseTime = System.currentTimeMillis() - startTime;
channel.close();
String result = response.toString();
if (!StringUtils.hasText(result)) {
builder.setCode(CollectRep.Code.FAIL);
builder.setMsg("采集数据失败");
}
switch (sshProtocol.getParseType()) {
case PARSE_TYPE_ONE_ROW:
parseResponseDataByOne(result, metrics.getAliasFields(), builder, responseTime);
break;
default: parseResponseDataByMulti(result, metrics.getAliasFields(), builder, responseTime);
break;
}
} catch (ConnectException connectException) {
log.debug(connectException.getMessage());
builder.setCode(CollectRep.Code.UN_CONNECTABLE);
builder.setMsg("对端拒绝连接:服务未启动端口监听或防火墙");
} catch (IOException ioException) {
log.debug(ioException.getMessage());
builder.setCode(CollectRep.Code.UN_CONNECTABLE);
builder.setMsg("对端连接失败 " + ioException.getMessage());
} catch (Exception exception) {
log.debug(exception.getMessage());
builder.setCode(CollectRep.Code.FAIL);
builder.setMsg(exception.getMessage());
}
}
private void parseResponseDataByOne(String result, List<String> aliasFields, CollectRep.MetricsData.Builder builder, Long responseTime) {
String[] lines = result.split("\n");
if (lines.length + 1 < aliasFields.size()) {
log.error("ssh response data not enough: {}", result);
}
CollectRep.ValueRow.Builder valueRowBuilder = CollectRep.ValueRow.newBuilder();
int aliasIndex = 0;
int lineIndex = 0;
while (aliasIndex < aliasFields.size()) {
if (CollectorConstants.RESPONSE_TIME.equalsIgnoreCase(aliasFields.get(aliasIndex))) {
valueRowBuilder.addColumns(responseTime.toString());
} else {
valueRowBuilder.addColumns(lines[lineIndex].trim());
lineIndex++;
}
aliasIndex++;
}
builder.addValues(valueRowBuilder.build());
}
private void parseResponseDataByMulti(String result, List<String> aliasFields,
CollectRep.MetricsData.Builder builder, Long responseTime) {
String[] lines = result.split("\n");
if (lines.length <= 1) {
log.error("ssh response data only has header: {}", result);
}
String[] fields = lines[0].split(" ");
Map<String, Integer> fieldMapping = new HashMap<>(fields.length);
for (int i = 0; i < fields.length; i++) {
fieldMapping.put(fields[i].trim().toLowerCase(), i);
}
for (int i = 1; i < lines.length; i++) {
String[] values = lines[i].split(" ");
CollectRep.ValueRow.Builder valueRowBuilder = CollectRep.ValueRow.newBuilder();
for (String alias : aliasFields) {
if (CollectorConstants.RESPONSE_TIME.equalsIgnoreCase(alias)) {
valueRowBuilder.addColumns(responseTime.toString());
} else {
Integer index = fieldMapping.get(alias.toLowerCase());
if (index != null && index < values.length) {
valueRowBuilder.addColumns(values[index]);
} else {
valueRowBuilder.addColumns(CommonConstants.NULL_VALUE);
}
}
}
builder.addValues(valueRowBuilder.build());
}
}
private ClientSession getConnectSession(SshProtocol sshProtocol, int timeout) throws IOException {
CacheIdentifier identifier = CacheIdentifier.builder()
.ip(sshProtocol.getHost()).port(sshProtocol.getPort())
.username(sshProtocol.getUsername()).password(sshProtocol.getPassword())
.build();
Optional<Object> cacheOption = CommonCache.getInstance().getCache(identifier, true);
ClientSession clientSession = null;
if (cacheOption.isPresent()) {
clientSession = (ClientSession) cacheOption.get();
try {
if (clientSession.isClosed() || clientSession.isClosing()) {
clientSession = null;
CommonCache.getInstance().removeCache(identifier);
}
} catch (Exception e) {
log.warn(e.getMessage());
clientSession = null;
CommonCache.getInstance().removeCache(identifier);
}
}
if (clientSession != null) {
return clientSession;
}
SshClient sshClient = CommonSshClient.getSshClient();
clientSession = sshClient.connect(sshProtocol.getUsername(), sshProtocol.getHost(), Integer.parseInt(sshProtocol.getPort()))
.verify(timeout, TimeUnit.MILLISECONDS).getSession();
if (StringUtils.hasText(sshProtocol.getPassword())) {
clientSession.addPasswordIdentity(sshProtocol.getPassword());
}
// 进行认证
if (!clientSession.auth().verify(timeout, TimeUnit.MILLISECONDS).isSuccess()) {
throw new IllegalArgumentException("认证失败");
}
CommonCache.getInstance().addCache(identifier, clientSession);
return clientSession;
}
private void validateParams(Metrics metrics) throws Exception {
if (metrics == null || metrics.getSsh() == null) {
throw new Exception("Ssh collect must has ssh params");
}
}
private static class Singleton {
private static final SshCollectImpl INSTANCE = new SshCollectImpl();
}
}

View File

@@ -13,7 +13,7 @@ import java.io.IOException;
import java.net.ConnectException; import java.net.ConnectException;
/** /**
* icmp协议采集实现 - ping * telnet协议采集实现
* @author tom * @author tom
* @date 2021/12/4 12:32 * @date 2021/12/4 12:32
*/ */

View File

@@ -24,6 +24,10 @@ public interface DispatchConstants {
* 协议 jdbc * 协议 jdbc
*/ */
String PROTOCOL_JDBC = "jdbc"; String PROTOCOL_JDBC = "jdbc";
/**
* 协议 ssh
*/
String PROTOCOL_SSH = "ssh";
// 协议类型相关 - end // // 协议类型相关 - end //
// http协议相关 - start 需尽可能先复用 HttpHeaders // // http协议相关 - start 需尽可能先复用 HttpHeaders //

View File

@@ -6,6 +6,7 @@ import com.usthe.collector.collect.AbstractCollect;
import com.usthe.collector.collect.database.JdbcCommonCollect; import com.usthe.collector.collect.database.JdbcCommonCollect;
import com.usthe.collector.collect.http.HttpCollectImpl; import com.usthe.collector.collect.http.HttpCollectImpl;
import com.usthe.collector.collect.icmp.IcmpCollectImpl; import com.usthe.collector.collect.icmp.IcmpCollectImpl;
import com.usthe.collector.collect.ssh.SshCollectImpl;
import com.usthe.collector.collect.telnet.TelnetCollectImpl; import com.usthe.collector.collect.telnet.TelnetCollectImpl;
import com.usthe.collector.dispatch.timer.Timeout; import com.usthe.collector.dispatch.timer.Timeout;
import com.usthe.collector.dispatch.timer.WheelTimerTask; import com.usthe.collector.dispatch.timer.WheelTimerTask;
@@ -111,6 +112,9 @@ public class MetricsCollect implements Runnable, Comparable<MetricsCollect> {
case DispatchConstants.PROTOCOL_JDBC: case DispatchConstants.PROTOCOL_JDBC:
abstractCollect = JdbcCommonCollect.getInstance(); abstractCollect = JdbcCommonCollect.getInstance();
break; break;
case DispatchConstants.PROTOCOL_SSH:
abstractCollect = SshCollectImpl.getInstance();
break;
// todo // todo
default: break; default: break;
} }

View File

@@ -3,6 +3,7 @@ package com.usthe.common.entity.job;
import com.usthe.common.entity.job.protocol.HttpProtocol; import com.usthe.common.entity.job.protocol.HttpProtocol;
import com.usthe.common.entity.job.protocol.IcmpProtocol; import com.usthe.common.entity.job.protocol.IcmpProtocol;
import com.usthe.common.entity.job.protocol.JdbcProtocol; import com.usthe.common.entity.job.protocol.JdbcProtocol;
import com.usthe.common.entity.job.protocol.SshProtocol;
import com.usthe.common.entity.job.protocol.TcpUdpProtocol; import com.usthe.common.entity.job.protocol.TcpUdpProtocol;
import com.usthe.common.entity.job.protocol.TelnetProtocol; import com.usthe.common.entity.job.protocol.TelnetProtocol;
import lombok.AllArgsConstructor; import lombok.AllArgsConstructor;
@@ -73,6 +74,10 @@ public class Metrics {
* 使用公共的jdbc规范实现的数据库配置信息 * 使用公共的jdbc规范实现的数据库配置信息
*/ */
private JdbcProtocol jdbc; private JdbcProtocol jdbc;
/**
* 使用公共的ssh协议的监控配置信息
*/
private SshProtocol ssh;
@Override @Override
public boolean equals(Object o) { public boolean equals(Object o) {

View File

@@ -0,0 +1,58 @@
package com.usthe.common.entity.job.protocol;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
/**
* ssh 协议参数配置
* @author tom
* @date 2022/3/11 15:20
*/
@Data
@Builder
@AllArgsConstructor
@NoArgsConstructor
public class SshProtocol {
/**
* 对端主机ip或域名
*/
private String host;
/**
* 对端主机端口
*/
private String port;
/**
* 超时时间
*/
private String timeout = "3000";
/**
* 用户名
*/
private String username;
/**
* 密码(可选)
*/
private String password;
/**
* 公钥(可选)
*/
private String publicKey;
/**
* SSH执行脚本
*/
private String script;
/**
* 响应数据解析方式oneRow, multiRow
*/
private String parseType;
}

View File

@@ -20,6 +20,10 @@ sidebar_label: 帮助入门
[MYSQL数据库监控](mysql) &emsp;&emsp;&emsp;&emsp; [MariaDB数据库监控](mariadb) &emsp;&emsp;&emsp;&emsp; [PostgreSQL数据库监控](postgresql) [MYSQL数据库监控](mysql) &emsp;&emsp;&emsp;&emsp; [MariaDB数据库监控](mariadb) &emsp;&emsp;&emsp;&emsp; [PostgreSQL数据库监控](postgresql)
### 操作系统监控
[Linux操作系统监控](linux) &emsp;&emsp;&emsp;&emsp;
## 💡 告警服务 ## 💡 告警服务
> 更自由化的阈值告警配置支持邮箱短信webhook钉钉企业微信飞书机器人等告警通知。 > 更自由化的阈值告警配置支持邮箱短信webhook钉钉企业微信飞书机器人等告警通知。

View File

@@ -10,7 +10,7 @@ sidebar_label: 常见问题
> 如信息所示输入的监控Host须是ipv4,ipv6或域名不能携带协议头例如协议头http > 如信息所示输入的监控Host须是ipv4,ipv6或域名不能携带协议头例如协议头http
2. ** 网站API等监控反馈statusCode:403或401但对端服务本身无需认证浏览器直接访问是OK ** 2. ** 网站API等监控反馈statusCode:403或401但对端服务本身无需认证浏览器直接访问是OK **
> 请排查是否是被防火墙拦截如宝塔等默认设置了对请求header中`User-Agent=Apache-HttpClient`的拦截,若被拦截请删除此拦截规则。 > 请排查是否是被防火墙拦截如宝塔等默认设置了对请求header中`User-Agent=Apache-HttpClient`的拦截,若被拦截请删除此拦截规则。(v1.0.beat5版本已将user-agent模拟成浏览器此问题不存在)
### Docker部署常见问题 ### Docker部署常见问题

70
home/docs/help/linux.md Normal file
View File

@@ -0,0 +1,70 @@
---
id: linux
title: 监控Linux操作系统监控
sidebar_label: Linux操作系统
---
> 对Linux操作系统的通用性能指标进行采集监控。
### 配置参数
| 参数名称 | 参数帮助描述 |
| ----------- | ----------- |
| 监控Host | 被监控的对端IPV4IPV6或域名。注意⚠不带协议头(eg: https://, http://)。 |
| 监控名称 | 标识此监控的名称,名称需要保证唯一性。 |
| 端口 | Linux SSH对外提供的端口默认为22。 |
| 用户名 | SSH连接用户名可选 |
| 密码 | SSH连接密码可选 |
| 采集间隔 | 监控周期性采集数据间隔时间单位秒可设置的最小间隔为10秒 |
| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 |
| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 |
### 采集指标
#### 指标集合basic
| 指标名称 | 指标单位 | 指标帮助描述 |
| ----------- | ----------- | ----------- |
| hostname | 无 | 主机名称 |
| version | 无 | 操作系统版本 |
| uptime | 无 | 系统运行时间 |
#### 指标集合cpu
| 指标名称 | 指标单位 | 指标帮助描述 |
| ----------- | ----------- | ----------- |
| info | 无 | CPU型号 |
| cores | 核数 | CPU内核数量 |
| interrupt | 个数 | CPU中断数量 |
| load | 无 | CPU最近1/5/15分钟的平均负载 |
| context_switch | 个数 | 当前上下文切换数量 |
#### 指标集合memory
| 指标名称 | 指标单位 | 指标帮助描述 |
| ----------- | ----------- | ----------- |
| total | Mb | 总内存容量 |
| used | Mb | 用户程序内存量 |
| free | Mb | 空闲内存容量 |
| buff_cache | Mb | 缓存占用内存 |
| available | Mb | 剩余可用内存容 |
#### 指标集合disk
| 指标名称 | 指标单位 | 指标帮助描述 |
| ----------- | ----------- | ----------- |
| disk_num | 块数 | 磁盘总数 |
| partition_num | 分区数 | 分区总数 |
| block_write | 块数 | 写入磁盘的总块数 |
| block_read | 块数 | 从磁盘读出的块数 |
| write_rate | iops | 每秒写磁盘块的速率 |
#### 指标集合interface
| 指标名称 | 指标单位 | 指标帮助描述 |
| ----------- | ----------- | ----------- |
| interface_name | 无 | 网卡名称 |
| receive_bytes | byte | 入站数据流量(bytes) |
| transmit_bytes | byte | 出站数据流量(bytes) |

View File

@@ -60,6 +60,13 @@
"help/postgresql" "help/postgresql"
] ]
}, },
{
"type": "category",
"label": "操作系统",
"items": [
"help/linux"
]
},
{ {
"type": "category", "type": "category",
"label": "阈值告警配置", "label": "阈值告警配置",

View File

@@ -0,0 +1,168 @@
# 此监控类型所属类别service-应用服务监控 db-数据库监控 custom-自定义监控 os-操作系统监控
category: os
# 监控应用类型(与文件名保持一致) eg: linux windows tomcat mysql aws...
app: linux
name:
zh-CN: Linux操作系统
en-US: OS Linux
# 参数映射map. type是参数类型: 0-number数字, 1-string明文字符串, 2-secret加密字符串
# 强制固定必须参数 - host
configmap:
- key: host
type: 1
- key: port
type: 0
- key: username
type: 1
- key: password
type: 2
# 指标组列表
metrics:
# 第一个监控指标组 basic
# 注意:内置监控指标有 (responseTime - 响应时间)
- name: basic
# 指标组调度优先级(0-127)越小优先级越高,优先级低的指标组会等优先级高的指标组采集完成后才会被调度,相同优先级的指标组会并行调度采集
# 优先级为0的指标组为可用性指标组,即它会被首先调度,采集成功才会继续调度其它指标组,采集失败则中断调度
priority: 0
# 指标组中的具体监控指标
fields:
# 指标信息 包括 field名称 type字段类型:0-number数字,1-string字符串 instance是否为实例主键 unit:指标单位
- field: hostname
type: 1
instance: true
- field: version
type: 1
- field: uptime
type: 1
# 监控采集使用协议 eg: sql, ssh, http, telnet, wmi, snmp, sdk
protocol: ssh
# 当protocol为http协议时具体的采集配置
ssh:
# 主机host: ipv4 ipv6 域名
host: ^_^host^_^
# 端口
port: ^_^port^_^
username: ^_^username^_^
password: ^_^password^_^
script: (uname -r ; hostname ; uptime | awk -F "," '{print $1}' | sed "s/ //g") | sed ":a;N;s/\n/^/g;ta" | awk -F '^' 'BEGIN{print "version hostname uptime"} {print $1, $2, $3}'
# 响应数据解析方式oneRow, multiRow
parseType: multiRow
- name: cpu
priority: 1
fields:
# 指标信息 包括 field名称 type字段类型:0-number数字,1-string字符串 instance是否为实例主键 unit:指标单位
- field: info
type: 1
- field: cores
type: 0
unit: 核数
- field: interrupt
type: 0
unit: 个数
- field: load
type: 1
- field: context_switch
type: 0
unit: 个数
# 监控采集使用协议 eg: sql, ssh, http, telnet, wmi, snmp, sdk
protocol: ssh
# 当protocol为http协议时具体的采集配置
ssh:
# 主机host: ipv4 ipv6 域名
host: ^_^host^_^
# 端口
port: ^_^port^_^
username: ^_^username^_^
password: ^_^password^_^
script: "LANG=C lscpu | awk -F: '/Model name/ {print $2}';awk '/processor/{core++} END{print core}' /proc/cpuinfo;uptime | sed 's/,/ /g' | awk '{for(i=NF-2;i<=NF;i++)print $i }' | xargs;vmstat 1 1 | awk 'NR==3{print $11}';vmstat 1 1 | awk 'NR==3{print $12}'"
parseType: oneRow
- name: memory
priority: 2
fields:
# 指标信息 包括 field名称 type字段类型:0-number数字,1-string字符串 instance是否为实例主键 unit:指标单位
- field: total
type: 0
unit: Mb
- field: used
type: 0
unit: Mb
- field: free
type: 0
unit: Mb
- field: buff_cache
type: 0
unit: Mb
- field: available
type: 0
unit: Mb
# 监控采集使用协议 eg: sql, ssh, http, telnet, wmi, snmp, sdk
protocol: ssh
# 当protocol为http协议时具体的采集配置
ssh:
# 主机host: ipv4 ipv6 域名
host: ^_^host^_^
# 端口
port: ^_^port^_^
username: ^_^username^_^
password: ^_^password^_^
script: free -m | grep Mem | awk 'BEGIN{print "total used free buff_cache available"} {print $2,$3,$4,$6,$7}'
parseType: multiRow
- name: disk
priority: 3
fields:
# 指标信息 包括 field名称 type字段类型:0-number数字,1-string字符串 instance是否为实例主键 unit:指标单位
- field: disk_num
type: 0
unit: 块数
- field: partition_num
type: 0
unit: 分区数
- field: block_write
type: 0
unit: 块数
- field: block_read
type: 0
unit: 块数
- field: write_rate
type: 0
unit: iops
# 监控采集使用协议 eg: sql, ssh, http, telnet, wmi, snmp, sdk
protocol: ssh
# 当protocol为http协议时具体的采集配置
ssh:
# 主机host: ipv4 ipv6 域名
host: ^_^host^_^
# 端口
port: ^_^port^_^
username: ^_^username^_^
password: ^_^password^_^
script: vmstat -D | awk 'NR==1{print $1}';vmstat -D | awk 'NR==2{print $1}';vmstat 1 1 | awk 'NR==3{print $10}';vmstat 1 1 | awk 'NR==3{print $9}';vmstat 1 1 | awk 'NR==3{print $16}'
parseType: oneRow
- name: interface
priority: 4
fields:
# 指标信息 包括 field名称 type字段类型:0-number数字,1-string字符串 instance是否为实例主键 unit:指标单位
- field: interface_name
type: 1
- field: receive_bytes
type: 0
unit: byte
- field: transmit_bytes
type: 0
unit: byte
# 监控采集使用协议 eg: sql, ssh, http, telnet, wmi, snmp, sdk
protocol: ssh
# 当protocol为http协议时具体的采集配置
ssh:
# 主机host: ipv4 ipv6 域名
host: ^_^host^_^
# 端口
port: ^_^port^_^
username: ^_^username^_^
password: ^_^password^_^
script: cat /proc/net/dev | tail -n +3 | awk 'BEGIN{ print "interface_name receive_bytes transmit_bytes"} {print $1,$2,$10}'
parseType: multiRow

View File

@@ -0,0 +1,22 @@
app: linux
param:
- field: host
name: 主机Host
type: host
required: true
- field: port
name: 端口
type: number
range: '[0,65535]'
required: true
defaultValue: 22
placeholder: '请输入端口'
- field: username
name: 用户名
type: text
limit: 20
required: true
- field: password
name: 密码
type: password
required: true

View File

@@ -44,7 +44,7 @@
{ {
"key": "os", "key": "os",
"text": "操作系统", "text": "操作系统",
"hide": true, "hide": false,
"i18n": "menu.monitor.os", "i18n": "menu.monitor.os",
"icon": "anticon-windows" "icon": "anticon-windows"
}, },