Skip to content

Commit 4b4f04b

Browse files
Merge branch 'master' into feature/component_templates
2 parents 981a1ca + 77ba703 commit 4b4f04b

203 files changed

Lines changed: 6124 additions & 2793 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

.ci/end2end.groovy

Lines changed: 13 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,7 @@ pipeline {
1212
environment {
1313
BASE_DIR = 'src/github.com/elastic/kibana'
1414
HOME = "${env.WORKSPACE}"
15-
APM_ITS = 'apm-integration-testing'
16-
CYPRESS_DIR = 'x-pack/plugins/apm/e2e'
15+
E2E_DIR = 'x-pack/plugins/apm/e2e'
1716
PIPELINE_LOG_LEVEL = 'DEBUG'
1817
}
1918
options {
@@ -43,32 +42,6 @@ pipeline {
4342
env.APM_UPDATED = isGitRegionMatch(patterns: regexps)
4443
}
4544
}
46-
dir("${APM_ITS}"){
47-
git changelog: false,
48-
credentialsId: 'f6c7695a-671e-4f4f-a331-acdce44ff9ba',
49-
poll: false,
50-
url: "git@github.com:elastic/${APM_ITS}.git"
51-
}
52-
}
53-
}
54-
stage('Start services') {
55-
options { skipDefaultCheckout() }
56-
when {
57-
anyOf {
58-
expression { return params.FORCE }
59-
expression { return env.APM_UPDATED != "false" }
60-
}
61-
}
62-
steps {
63-
notifyStatus('Starting services', 'PENDING')
64-
dir("${APM_ITS}"){
65-
sh './scripts/compose.py start master --no-kibana'
66-
}
67-
}
68-
post {
69-
unsuccessful {
70-
notifyStatus('Environmental issue', 'FAILURE')
71-
}
7245
}
7346
}
7447
stage('Prepare Kibana') {
@@ -85,7 +58,7 @@ pipeline {
8558
steps {
8659
notifyStatus('Preparing kibana', 'PENDING')
8760
dir("${BASE_DIR}"){
88-
sh script: "${CYPRESS_DIR}/ci/prepare-kibana.sh"
61+
sh "${E2E_DIR}/ci/prepare-kibana.sh"
8962
}
9063
}
9164
post {
@@ -105,24 +78,20 @@ pipeline {
10578
steps{
10679
notifyStatus('Running smoke tests', 'PENDING')
10780
dir("${BASE_DIR}"){
108-
sh '''
109-
jobs -l
110-
docker build --tag cypress --build-arg NODE_VERSION=$(cat .node-version) ${CYPRESS_DIR}/ci
111-
docker run --rm -t --user "$(id -u):$(id -g)" \
112-
-v `pwd`:/app --network="host" \
113-
--name cypress cypress'''
81+
sh "${E2E_DIR}/ci/run-e2e.sh"
11482
}
11583
}
11684
post {
11785
always {
118-
dir("${BASE_DIR}"){
119-
archiveArtifacts(allowEmptyArchive: false, artifacts: "${CYPRESS_DIR}/**/screenshots/**,${CYPRESS_DIR}/**/videos/**,${CYPRESS_DIR}/**/test-results/*e2e-tests.xml")
120-
junit(allowEmptyResults: true, testResults: "${CYPRESS_DIR}/**/test-results/*e2e-tests.xml")
121-
}
122-
dir("${APM_ITS}"){
123-
sh 'docker-compose logs > apm-its.log || true'
124-
sh 'docker-compose down -v || true'
125-
archiveArtifacts(allowEmptyArchive: false, artifacts: 'apm-its.log')
86+
dir("${BASE_DIR}/${E2E_DIR}"){
87+
archiveArtifacts(allowEmptyArchive: false, artifacts: 'cypress/screenshots/**,cypress/videos/**,cypress/test-results/*e2e-tests.xml')
88+
junit(allowEmptyResults: true, testResults: 'cypress/test-results/*e2e-tests.xml')
89+
dir('tmp/apm-integration-testing'){
90+
sh 'docker-compose logs > apm-its-docker.log || true'
91+
sh 'docker-compose down -v || true'
92+
archiveArtifacts(allowEmptyArchive: true, artifacts: 'apm-its-docker.log')
93+
}
94+
archiveArtifacts(allowEmptyArchive: true, artifacts: 'tmp/*.log')
12695
}
12796
}
12897
unsuccessful {
@@ -137,7 +106,7 @@ pipeline {
137106
post {
138107
always {
139108
dir("${BASE_DIR}"){
140-
archiveArtifacts(allowEmptyArchive: true, artifacts: "${CYPRESS_DIR}/ingest-data.log,kibana.log")
109+
archiveArtifacts(allowEmptyArchive: true, artifacts: "${E2E_DIR}/kibana.log")
141110
}
142111
}
143112
}

.github/CODEOWNERS

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,7 @@
142142
/config/kibana.yml @elastic/kibana-platform
143143
/x-pack/plugins/features/ @elastic/kibana-platform
144144
/x-pack/plugins/licensing/ @elastic/kibana-platform
145+
/x-pack/plugins/cloud/ @elastic/kibana-platform
145146
/packages/kbn-config-schema/ @elastic/kibana-platform
146147
/src/legacy/server/config/ @elastic/kibana-platform
147148
/src/legacy/server/http/ @elastic/kibana-platform
84.7 KB
Loading
55.2 KB
Loading
192 KB
Loading
Lines changed: 144 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,144 @@
1+
[role="xpack"]
2+
[[ingest-node-pipelines]]
3+
== Ingest Node Pipelines
4+
5+
*Ingest Node Pipelines* enables you to create and manage {es}
6+
pipelines that perform common transformations and
7+
enrichments on your data. For example, you might remove a field,
8+
rename an existing field, or set a new field.
9+
10+
You’ll find *Ingest Node Pipelines* in *Management > Elasticsearch*. With this feature, you can:
11+
12+
* View a list of your pipelines and drill down into details.
13+
* Create a pipeline that defines a series of tasks, known as processors.
14+
* Test a pipeline before feeding it with real data to ensure the pipeline works as expected.
15+
* Delete a pipeline that is no longer needed.
16+
17+
[role="screenshot"]
18+
image:management/ingest-pipelines/images/ingest-pipeline-list.png["Ingest node pipeline list"]
19+
20+
[float]
21+
=== Required permissions
22+
23+
The minimum required permissions to access *Ingest Node Pipelines* are
24+
the `manage_pipeline` and `cluster:monitor/nodes/info` cluster privileges.
25+
26+
You can add these privileges in *Management > Security > Roles*.
27+
28+
[role="screenshot"]
29+
image:management/ingest-pipelines/images/ingest-pipeline-privileges.png["Privileges required for Ingest Node Pipelines"]
30+
31+
[float]
32+
[[ingest-node-pipelines-manage]]
33+
=== Manage pipelines
34+
35+
From the list view, you can to drill down into the details of a pipeline.
36+
To
37+
edit, clone, or delete a pipeline, use the *Actions* menu.
38+
39+
If you don’t have any pipelines, you can create one using the
40+
*Create pipeline* form. You’ll define processors to transform documents
41+
in a specific way. To handle exceptions, you can optionally define
42+
failure processors to execute immediately after a failed processor.
43+
Before creating the pipeline, you can verify it provides the expected output.
44+
45+
[float]
46+
[[ingest-node-pipelines-example]]
47+
==== Example: Create a pipeline
48+
49+
In this example, you’ll create a pipeline to handle server logs in the
50+
Common Log Format. The log looks similar to this:
51+
52+
[source,js]
53+
----------------------------------
54+
212.87.37.154 - - [05/May/2020:16:21:15 +0000] \"GET /favicon.ico HTTP/1.1\"
55+
200 3638 \"-\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6)
56+
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36\"
57+
----------------------------------
58+
59+
The log contains an IP address, timestamp, and user agent. You want to give
60+
these three items their own field in {es} for fast search and visualization.
61+
You also want to know where the request is coming from.
62+
63+
. In *Ingest Node Pipelines*, click *Create a pipeline*.
64+
. Provide a name and description for the pipeline.
65+
. Define the processors:
66+
+
67+
[source,js]
68+
----------------------------------
69+
[
70+
{
71+
"grok": {
72+
"field": "message",
73+
"patterns": ["%{IPORHOST:clientip} %{USER:ident} %{USER:auth} \\[%{HTTPDATE:timestamp}\\] \"%{WORD:verb} %{DATA:request} HTTP/%{NUMBER:httpversion}\" %{NUMBER:response:int} (?:-|%{NUMBER:bytes:int}) %{QS:referrer} %{QS:agent}"]
74+
}
75+
},
76+
{
77+
"date": {
78+
"field": "timestamp",
79+
"formats": [ "dd/MMM/YYYY:HH:mm:ss Z" ]
80+
}
81+
},
82+
{
83+
"geoip": {
84+
"field": "clientip"
85+
}
86+
},
87+
{
88+
"user_agent": {
89+
"field": "agent"
90+
}
91+
}
92+
]
93+
----------------------------------
94+
+
95+
This code defines four {ref}/ingest-processors.html[processors] that run sequentially:
96+
{ref}/grok-processor.html[grok], {ref}/date-processor.html[date],
97+
{ref}/geoip-processor.html[geoip], and {ref}/user-agent-processor.html[user_agent].
98+
Your form should look similar to this:
99+
+
100+
[role="screenshot"]
101+
image:management/ingest-pipelines/images/ingest-pipeline-processor.png["Processors for Ingest Node Pipelines"]
102+
103+
. To verify that the pipeline gives the expected outcome, click *Test pipeline*.
104+
105+
. In the *Document* tab, provide the following sample document for testing:
106+
+
107+
[source,js]
108+
----------------------------------
109+
[
110+
{
111+
"_source": {
112+
"message": "212.87.37.154 - - [05/May/2020:16:21:15 +0000] \"GET /favicon.ico HTTP/1.1\" 200 3638 \"-\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36\""
113+
}
114+
}
115+
]
116+
----------------------------------
117+
118+
. Click *Run the pipeline* and check if the pipeline worked as expected.
119+
+
120+
You can also
121+
view the verbose output and refresh the output from this view.
122+
123+
. If everything looks correct, close the panel, and then click *Create pipeline*.
124+
+
125+
At this point, you’re ready to use the Elasticsearch index API to load
126+
the logs data.
127+
128+
. In the Kibana Console, index a document with the pipeline
129+
you created.
130+
+
131+
[source,js]
132+
----------------------------------
133+
PUT my-index/_doc/1?pipeline=access_logs
134+
{
135+
"message": "212.87.37.154 - - [05/May/2020:16:21:15 +0000] \"GET /favicon.ico HTTP/1.1\" 200 3638 \"-\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36\""
136+
}
137+
----------------------------------
138+
139+
. To verify, run:
140+
+
141+
[source,js]
142+
----------------------------------
143+
GET my-index/_doc/1
144+
----------------------------------

docs/uptime/alerting.asciidoc

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
[role="xpack"]
2+
[[uptime-alerting]]
3+
4+
== Uptime alerting
5+
6+
The Uptime app integrates with Kibana's {kibana-ref}/alerting-getting-started.html[alerting and actions]
7+
feature. It provides a set of built-in actions and Uptime specific threshold alerts for you to use
8+
and enables central management of all alerts from <<management, Kibana Management>>.
9+
10+
[float]
11+
=== Monitor status alerts
12+
13+
To receive alerts when a monitor goes down, use the alerting menu at the top of the
14+
overview page. Use a query in the alert flyout to determine which monitors to check
15+
with your alert. If you already have a query in the overview page search bar it will
16+
be carried over into this box.
17+
18+
[role="screenshot"]
19+
image::uptime/images/monitor-status-alert-flyout.png[Create monitor status alert flyout]
20+
21+
[float]
22+
=== TLS alerts
23+
24+
Uptime also provides the ability to create an alert that will notify you when one or
25+
more of your monitors have a TLS certificate that will expire within some threshold,
26+
or when its age exceeds a limit. The values for these thresholds are configurable on
27+
the <<uptime-settings, Settings page>>.
28+
29+
[role="screenshot"]
30+
image::uptime/images/tls-alert-flyout.png[Create TLS alert flyout]

docs/uptime/certificates.asciidoc

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
[role="xpack"]
2+
[[uptime-certificates]]
3+
4+
== Certificates
5+
6+
[role="screenshot"]
7+
image::uptime/images/certificates-page.png[Certificates]
8+
9+
The certificates page allows you to visualize TLS certificate data in your indices. In addition to the
10+
common name, associated monitors, issuer information, and SHA fingerprints, Uptime also assigns a status
11+
derived from the threshold values in the <<uptime-settings, Settings page>>.
12+
13+
Several of the columns on this page are sortable. You can use the search bar at the top of the view
14+
to find values in most of the TLS-related fields in your Uptime indices. Additionally, you can
15+
create a TLS alert using the `Alerts` dropdown at the top of the page.
-181 KB
Binary file not shown.
559 KB
Loading

0 commit comments

Comments
 (0)