sudo bin/elasticsearch-plugin install repository-azure
sudo /usr/share/elasticsearch/bin/elasticsearch-plugin install repository-azure
sudo systemctl restart elasticsearch
bin/elasticsearch-keystore add azure.client.default.account
bin/elasticsearch-keystore add azure.client.default.key
/usr/share/elasticsearch/bin/elasticsearch-keystore
, it depends on your installation configuration. After setting the values in the keystore, restart your elasticsearch cluster. sudo systemctl restart elasticsearch
http://eshost:port/_snapshot/name-of-your-repo
and pass the these json payload{
"type": "azure",
"settings": {
"container": "backup-container",
"base_path": "backups",
"chunk_size": "32MB",
"compress": true
}
}
base_path
defines a folder where snapshot data should be stored, this is useful if you are taking snapshot of different indices or even data from different clusters and storing them in one container. chunk_size
defines how small big files can be broken down to prior to being transferred . you can get more details about the settings by following this link. Below is a screenshot of my settingshttp://eshost:port/_snapshot/azureblob_backup/%3Csnapshot-%7Bnow%2Fd%7D%3E
{
"indices": "index_1,index_2",
"ignore_unavailable": true,
"include_global_state": true,
"partial" : true
}
%3Csnapshot-%7Bnow%2Fd%7D%3E,
this is the url encoded version of this <snapshot-{now/d}>,
this is translated to the current date the snapshot was taken, i.e /snapshot-2020.04.09,
also note that it isn't a prerequisite to name your backups this way, you can give it any name you want to, this just makes sense in case you are doing daily or weekly backups via cron for example, to be able to reference snapshots easily. http://eshost:port/_snapshot/azureblob_backup/<snapshot-name>
, http://eshost:port/_snapshot/azureblob_backup/snapshot-2020.04.09,
{
"snapshots": [
{
"snapshot": "snapshot-2020.04.09",
"uuid": "OjLZEfXDS-mKVqsSi7VteQ",
"version_id": 6080399,
"version": "6.8.3",
"indices": [
"sample_records"
],
"include_global_state": true,
"state": "SUCCESS",
"start_time": "2020-04-08T09:53:47.926Z",
"start_time_in_millis": 1586339627926,
"end_time": "2020-04-08T10:03:15.361Z",
"end_time_in_millis": 1586340195361,
"duration_in_millis": 567435,
"failures": [],
"shards": {
"total": 15,
"failed": 0,
"successful": 15
}
}
]
}
Next, send a post request to the following endpoint to restore your snapshot to the new cluster,
http://eshost:port/_snapshot/<repo-name>/<snapshot-name>/_restore
A snapshot of an index created in 6.x can be restored to 7.x.
A snapshot of an index created in 5.x can be restored to 6.x.
A snapshot of an index created in 2.x can be restored to 5.x.
A snapshot of an index created in 1.x can be restored to 2.x.
This is from the official elasticsearch docs.
Bonus! Automating Things
const axios = require("axios")
const nodemailer = require('nodemailer');
/**
* Configure email
*/
let transporter = nodemailer.createTransport({
service: 'emailservice',
auth: {
user: '[email protected]',
pass: '*****************'
}
});
const SNAPSHOT_URL = 'http://localhost:9200/_snapshot/azureblob_backup/'
const CLUSTER_NAME = 'tutorial_cluster';
let dateObj = new Date();
let month = dateObj.getUTCMonth() + 1; //months from 1-12
let day = dateObj.getUTCDate();
let year = dateObj.getUTCFullYear();
let hour = dateObj.getUTCHours();
let minute = dateObj.getUTCMinutes();
let seconds = dateObj.getUTCSeconds();
let backuptime = `${year}-${month}-${day}-${hour}-${minute}-${seconds}`;
axios.post(`${SNAPSHOT_URL}snapshot-${backuptime}`,{
"ignore_unavailable": true,
"include_global_state": true
}).then((response)=>{
console.log(response.data.accepted)
if(response.data.accepted === true){
console.log("start checking for status")
checker();
}else{
console.log("send failure notification")
notify(`Could not start backup for ${CLUSTER_NAME}`)
}
},(error)=>{
console.log("Backup Not Started Error ===>", error)
notify(`Could not start backup for ${CLUSTER_NAME}`)
});
let checker = function(){
let intervalId = setInterval(() =>{
console.log("checking.....")
axios.get(`${SNAPSHOT_URL}snapshot-${backuptime}`)
.then(function (response) {
// handle success
let status = response.data.snapshots[0].state;
console.log(status);
if(status === 'SUCCESS'){
//send a success mail & clear interval
clearInterval(intervalId);
notify(` ${CLUSTER_NAME} Has Been Backed Up Successfully \n completed in ${milisecConvert(response.data.snapshots[0].duration_in_millis)} minute(s) \n please check http://${SNAPSHOT_URL}snapshot-${backuptime} for details`)
}else if(status === 'ABORTED' || status === 'FAILED' ){
//send failure message & clear interval
clearInterval(intervalId);
notify(` ${CLUSTER_NAME} Backup Failed please check http://${SNAPSHOT_URL}snapshot-${backuptime} for details `)
}else if(status === 'PARTIAL'){
//send failure message & clear interval
clearInterval(intervalId);
notify(` ${CLUSTER_NAME} Backed up with a few issues please check http://${SNAPSHOT_URL}snapshot-${backuptime} for details `)
}
else{
//continue
}
})
.catch(function (error) {
console.log("request status error >>>>>", error)
clearInterval(intervalId);
})
},5000);
}
let notify = (message)=>{
//set mail options
let mailOptions = {
from: '[email protected]',
to: '[email protected]',
subject: ` ${CLUSTER_NAME} Elasticsearch Backup Notification`,
text: message
};
transporter.sendMail(mailOptions, (error, info)=>{
if (error) {
console.log(error);
} else {
console.log('Email sent: ' + info.response);
}
});
}
let milisecConvert = (milisec)=>{
let hours,minutes;
hours = Math.floor(milisec/1000/60/60);
minutes = Math.floor((milisec/1000/60/60 - hours)*60);
return minutes > 1 ? munites : 'less than 1';
}
NOTES
O dabọ ✌