Thursday, July 21, 2016

Adding SWAP to AWS EC2 Instance

[root@ip-172-31-8-223 /]# more /etc/fstab

#
# /etc/fstab
# Created by anaconda on Mon Nov  9 20:20:10 2015
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
UUID=379de64d-ea11-4f5b-ae6a-0aa50ff7b24d /                       xfs     defaults        0 0
/dev/xvdf                                 /opt/IBMDocker                  btrfs   defaults        0 0
[root@ip-172-31-8-223 /]#
[root@ip-172-31-8-223 /]#
[root@ip-172-31-8-223 /]#
[root@ip-172-31-8-223 /]# more /etc/mnttab
/etc/mnttab: No such file or directory
[root@ip-172-31-8-223 /]#
[root@ip-172-31-8-223 /]#
[root@ip-172-31-8-223 /]# lsblk
NAME                                                                                  MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
xvda                                                                                  202:0    0   10G  0 disk
├─xvda1                                                                               202:1    0    1M  0 part
└─xvda2                                                                               202:2    0   10G  0 part /
xvdf                                                                                  202:80   0  150G  0 disk /opt/IBMDocker
xvdg                                                                                  202:96   0   20G  0 disk
loop0                                                                                   7:0    0  100G  0 loop
└─docker-0:36-3241-pool                                                               253:0    0  100G  0 dm
  ├─docker-0:36-3241-d54f81e9015ddc70c47a99fa8f2b682cbddde4a2bf8544e346447eb013adbcdc 253:1    0   50G  0 dm
  ├─docker-0:36-3241-3c373a7b8bc9c4a2d22e4c6d82ff112d22679dc226834175f4b7a0fc71eec85b 253:2    0   50G  0 dm
  ├─docker-0:36-3241-5ac5006fbca47bcedcfe9c3140cfbfa96b739d1e1a943287607915cd20b8011c 253:3    0   50G  0 dm
  ├─docker-0:36-3241-eaa5b76f2e987165319fb006d1850f7283d9071f01175f17da3d05126fbb7423 253:4    0   50G  0 dm
  ├─docker-0:36-3241-6bf3bda9ecc67ad639fb127bcdd91610b2dfb3b7b8c8479494bad342fb34c801 253:5    0   50G  0 dm
  └─docker-0:36-3241-3149fd173c6a79bfcd1dfb0c94d07a77caf3e1bd605106b0f8ad9affcae29ef3 253:6    0   50G  0 dm
loop1                                                                                   7:1    0    2G  0 loop
└─docker-0:36-3241-pool                                                               253:0    0  100G  0 dm
  ├─docker-0:36-3241-d54f81e9015ddc70c47a99fa8f2b682cbddde4a2bf8544e346447eb013adbcdc 253:1    0   50G  0 dm
  ├─docker-0:36-3241-3c373a7b8bc9c4a2d22e4c6d82ff112d22679dc226834175f4b7a0fc71eec85b 253:2    0   50G  0 dm
  ├─docker-0:36-3241-5ac5006fbca47bcedcfe9c3140cfbfa96b739d1e1a943287607915cd20b8011c 253:3    0   50G  0 dm
  ├─docker-0:36-3241-eaa5b76f2e987165319fb006d1850f7283d9071f01175f17da3d05126fbb7423 253:4    0   50G  0 dm
  ├─docker-0:36-3241-6bf3bda9ecc67ad639fb127bcdd91610b2dfb3b7b8c8479494bad342fb34c801 253:5    0   50G  0 dm
  └─docker-0:36-3241-3149fd173c6a79bfcd1dfb0c94d07a77caf3e1bd605106b0f8ad9affcae29ef3 253:6    0   50G  0 dm
[root@ip-172-31-8-223 /]# mkswap -f /dev/xvdg
Setting up swapspace version 1, size = 20971516 KiB
no label, UUID=e322f4a9-cfdb-4906-9c45-74362ba4fac6
[root@ip-172-31-8-223 /]# swapon /dev/xvdg
[root@ip-172-31-8-223 /]#
[root@ip-172-31-8-223 /]#
[root@ip-172-31-8-223 /]# lsblk
NAME                                                                                  MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
xvda                                                                                  202:0    0   10G  0 disk
├─xvda1                                                                               202:1    0    1M  0 part
└─xvda2                                                                               202:2    0   10G  0 part /
xvdf                                                                                  202:80   0  150G  0 disk /opt/IBMDocker
xvdg                                                                                  202:96   0   20G  0 disk [SWAP]
loop0                                                                                   7:0    0  100G  0 loop
└─docker-0:36-3241-pool                                                               253:0    0  100G  0 dm
  ├─docker-0:36-3241-d54f81e9015ddc70c47a99fa8f2b682cbddde4a2bf8544e346447eb013adbcdc 253:1    0   50G  0 dm
  ├─docker-0:36-3241-3c373a7b8bc9c4a2d22e4c6d82ff112d22679dc226834175f4b7a0fc71eec85b 253:2    0   50G  0 dm
  ├─docker-0:36-3241-5ac5006fbca47bcedcfe9c3140cfbfa96b739d1e1a943287607915cd20b8011c 253:3    0   50G  0 dm
  ├─docker-0:36-3241-eaa5b76f2e987165319fb006d1850f7283d9071f01175f17da3d05126fbb7423 253:4    0   50G  0 dm
  ├─docker-0:36-3241-6bf3bda9ecc67ad639fb127bcdd91610b2dfb3b7b8c8479494bad342fb34c801 253:5    0   50G  0 dm
  └─docker-0:36-3241-3149fd173c6a79bfcd1dfb0c94d07a77caf3e1bd605106b0f8ad9affcae29ef3 253:6    0   50G  0 dm
loop1                                                                                   7:1    0    2G  0 loop
└─docker-0:36-3241-pool                                                               253:0    0  100G  0 dm
  ├─docker-0:36-3241-d54f81e9015ddc70c47a99fa8f2b682cbddde4a2bf8544e346447eb013adbcdc 253:1    0   50G  0 dm
  ├─docker-0:36-3241-3c373a7b8bc9c4a2d22e4c6d82ff112d22679dc226834175f4b7a0fc71eec85b 253:2    0   50G  0 dm
  ├─docker-0:36-3241-5ac5006fbca47bcedcfe9c3140cfbfa96b739d1e1a943287607915cd20b8011c 253:3    0   50G  0 dm
  ├─docker-0:36-3241-eaa5b76f2e987165319fb006d1850f7283d9071f01175f17da3d05126fbb7423 253:4    0   50G  0 dm
  ├─docker-0:36-3241-6bf3bda9ecc67ad639fb127bcdd91610b2dfb3b7b8c8479494bad342fb34c801 253:5    0   50G  0 dm
  └─docker-0:36-3241-3149fd173c6a79bfcd1dfb0c94d07a77caf3e1bd605106b0f8ad9affcae29ef3 253:6    0   50G  0 dm
[root@ip-172-31-8-223 /]#
[root@ip-172-31-8-223 /]#
[root@ip-172-31-8-223 /]#
[root@ip-172-31-8-223 /]# swapon -v
NAME      TYPE      SIZE USED PRIO
/dev/xvdg partition  20G   0B   -1
[root@ip-172-31-8-223 /]#


[root@ip-172-31-8-223 /]# cat /etc/fstab

#
# /etc/fstab
# Created by anaconda on Mon Nov  9 20:20:10 2015
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
UUID=379de64d-ea11-4f5b-ae6a-0aa50ff7b24d /                       xfs     defaults        0 0
/dev/xvdf                                 /opt/IBMDocker                  btrfs   defaults        0 0
/dev/xvdg       swap    swap defaults   0       0
[root@ip-172-31-8-223 /]#
[root@ip-172-31-8-223 /]# swapon -v
[root@ip-172-31-8-223 /]#
[root@ip-172-31-8-223 /]#
[root@ip-172-31-8-223 /]# cat /etc/fstab

#
# /etc/fstab
# Created by anaconda on Mon Nov  9 20:20:10 2015
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
UUID=379de64d-ea11-4f5b-ae6a-0aa50ff7b24d /                       xfs     defaults        0 0
/dev/xvdf                                 /opt/IBMDocker                  btrfs   defaults        0 0
/dev/xvdg       swap    swap defaults   0       0
[root@ip-172-31-8-223 /]#
[root@ip-172-31-8-223 /]# swapoff -a
[root@ip-172-31-8-223 /]#
[root@ip-172-31-8-223 /]#
[root@ip-172-31-8-223 /]#
[root@ip-172-31-8-223 /]# swapon -a
[root@ip-172-31-8-223 /]#
[root@ip-172-31-8-223 /]#
[root@ip-172-31-8-223 /]# swapon -v
NAME      TYPE      SIZE USED PRIO
/dev/xvdg partition  20G   0B   -1

Saturday, July 16, 2016

Displaying status and percentage in DASH widget

http://www.ibm.com/support/knowledgecenter/SSSHYH_7.1.0.4/com.ibm.netcoolimpact.doc/solution/uidataprovider_status_and_precentages_widget_t.html

Displaying status and percentage in a widget

You can show status and percentage in topology, tree, table, and list widgets by using policies or data types. To show status and percentages in a widget, you must create a script in JavaScript format in the data type if the policy uses the GetByFilter function.

About this task

For data types, SQL, SNMP, and internal data types are supported. For policies the GetByFilter, DirectSQL and Impact Object, and Array Of Impact Objects are supported.
  1. Create the data type.
  2. In the data type configuration window, add the script to the Define Custom Types and Values (JavaScript) area.
    Restriction: Not all functions that are provided by JavaScript are supported. If you get a syntax error for a known JavaScript function, check that the function is supported in an Impact policy.
  3. Click the Check Syntax and Preview Sample Result button to preview the results and to check the syntax of the script.
For DirectSQL and Impact Object, Array Of Impact Objects, the Status, and Percentage can be specified when you create the schema definition. For policies, you can use IPL or JavaScript for the DirectSQL or GetByFilter functions.
The script uses the following syntax for data types and for policies that use the GetByFilter function.
ImpactUICustomValues.put("FieldName,Type",VariableName);
Where Type is either Percentage or Status. VariableName, can be a variable or hardcoded value. Always cast the variable name to String to avoid any error even if the value is numeric. See the following examples:
ImpactUICustomValues.put("MyField,Percentage",""+VariableName);
ImpactUICustomValues.put("MyField,Percentage","120");
ImpactUICustomValues.put("FieldName,Percentage",""+(field1/40));
The status field expects the value to be similar to the Topology widget configuration:
Table 1. Status field values
Status Number
Critical 5
Major 4
Minor 3
Warning 2
Intermediate or Indeterminate.
  • Either status is available when the connection to Netcool/Impact uses https.
  • If the connection is https, go to $IMPACT_HOME/etc/server.props and set the property
    • impact.uidataprovider.useiconfromprovider=true
  • For all examples, you can replace Intermediate with Indeterminate when needed.
1
There is no limit to how many fields you can put in the variable ImpactUICustomValues. The variable must be at the very end of the script. Anything before the variable must be in JavaScript and can be anything if the variable ImpactUICustomValues is populated correctly.
Example 1:
Assigns the field name from the table to be the status or the percentage and assigns the field value. This example assigns SHAREDOWN and PROFIT as the percentages, and STANDING as the status.
ImpactUICustomValues.put("SHAREUP,Percentage",SHAREUP);
ImpactUICustomValues.put("SHAREDOWN,Percentage",SHAREDOWN);
ImpactUICustomValues.put("PROFIT,Percentage",PROFIT);
ImpactUICustomValues.put("STANDING,Status",STANDING);
Example 2:
This example has an extra calculation to determine the value of percentage or status fields. The percentage assumes the maximum value to use is 100. Then, a factor is used to scale the values that are based on the maximum value that is expected by the user. The status and percentage is scaled based on a factor.
var status = "Normal";
var down = 0;
var up = 0;
var factor = ( TOTAL / 100);
down = (DOWN / factor);
up = (UP / factor);
var statusFactor = (DOWN / TOTAL) * 100;
if ( statusFactor >= 50) {
   status = "Critical";
} else if ( statusFactor >= 30 ) {
   status = "Major";
} else if (statusFactor >= 20) {
   status = "Minor";
} else if (statusFactor >= 10 ) {
  status = "Warning";
} else {
   status = "Normal";
}
ImpactUICustomValues.put("DownPercentage,Percentage",""+down);
ImpactUICustomValues.put("UpPercentage,Percentage",""+up);
ImpactUICustomValues.put("NetworkStatus,Status",""+status);
Example 3:
This example uses extra fields that do not exist in the table and used to be the Status and Percentage. The values are the exact values that come from fields that exist in the table. Calculation can be used to assign different values:
ImpactUICustomValues.put("CPUPercentUsage,Percentage",CPUUsage);
ImpactUICustomValues.put("RAMPercentUsage,Percentage",RAMUsage);
ImpactUICustomValues.put("DiskPercentUsage,Percentage",DiskUsage);
ImpactUICustomValues.put("NetworkAvailability,Status",NetworkStatus);
Tip: The Table or List widget shows duplicate entries or have missing data when you compare the data to the data type data items. Check the data source to ensure that all keys are unique.
Tip: In an instance where you use a policy function to create a dynamic filter, and you get a message in the policy log. The messages states that the filter variable is not defined in policy. No eventing occurs between widgets. Check that you are not using any special characters in the custom value in the data type for example, ImpactUICustomValues.put("CPU%,Percentage",""+value). The widgets do not support special characters in field names.
Tip: If a data type field type is incorrectly defined, for example the field is defined as an integer, but contains float values the widget fails to load. The widget shows a message similar to this example:
Failed to load
To resolve the issue, edit the data type field and select the correct data type float.

Wednesday, July 13, 2016

LA Insight pack for AWS Cloudwatch



o install the file:
copy it over to /opt/IBM/LogAnalysis/unity_content
cd to the directory above
../utilities/pkg_mgmt.sh -install AWS_v1.0.0.3.zip
this will install the insight pack with two new source types.  you can see these in the admin settings under "source types" tab:
aws_log and aws_metric
You can then create 2 new data sources in the Log Analysis admin screen for each file
In the admin settings tab, go to data sources and create 2 new data sources:  one for each log you want to load (metric and log)
First tab:  Local for a local file on the same computer as the la instance you are looking at.  Remote will ask you for SSH credentials for the remote server the log file resides on
second tab:  either use browse to select your file or manually input its location, use aws_metric or aws_log depending on the file you entered
third tab:  only mandatory field is the name.  Give it something meaningful.
>>>> you should see a message saying data was ingested.
Check this by going to search:

To see the LFA configs for these:
/opt/IBM/LogAnalysis/IBM-LFA-6.30/config/lo
there will be an fmt and conf pair

Grafana cloudwatch configuration




#cp -r ~/.aws /usr/share/grafana/
#chmod 777 /usr/share/grafana/.aws/credentials



Thursday, July 7, 2016

Installing APM on AWS EC2 Instance

Step 1) Download the software

Step 2) Prepare the OS environment

Step 3 ) Install the APM Server

Step 4) Test the access to console

Step 5) Install Linux Agent

#
# /etc/fstab
# Created by anaconda on Mon Nov  9 20:20:10 2015
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
UUID=379de64d-ea11-4f5b-ae6a-0aa50ff7b24d /                       xfs     defaults        0 0
/var/swap.1 swap swap defaults 0 0
UUID=379de64d-ea11-4f5b-ae6a-0aa50ff7b24d /tmp    tmpfs     defaults 0  2





# ./install.sh

Do you want to upgrade from an existing installation of the Performance Management server [ 1-yes or 2-no; "no" is default ]?

This script will install IBM Application Performance Management Advanced (8.1.3.0).

Do you want to continue [ 1-yes or 2-no; "yes" is default ]?

Do you want to change the default installation directory ( /opt/ibm ) [ 1-yes or 2-no; "no" is default ]?

Do you accept the license agreement(s) found in the /opt/software/APM/server/licenses/ipm_apm_advanced directory [ 1-accept or 2-d

License agreement was accepted, installation will proceed...

Do you want to change the default password for the administrator account [ 1-yes or 2-no; "no" is default ]?

Agent installation images must be configured to connect to this server. If you have downloaded the agent images to the same systemyou can configure the agent images now.

Do you want to configure the compressed (*.zip or *.tar) agent installation files now [ 1-yes or 2-no; "yes" is default ]?

Enter the path to the directory where you downloaded the compressed agent (and/or Hybrid Gateway) installation images (e.g. /opt/a
Enter the path or accept the default [/opt/software/APM/agent]:

Enter the path to the directory where configured agent installation images can be stored.
Enter the path or accept the default [/opt/ibm/ccm/depot]:

Enter the IP address/hostname that will be used by agents to communicate with the server.
Enter the IP address/hostname or accept the default [172.31.61.199]:

Enter the hostname and IP address of the server that will be used in a web browser to log in to the Performance Management consoleault values or provide your own.

Default values:

  Fully qualified domain name: ip-172-31-61-199.ec2.internal
  Short hostname: ip-172-31-61-199
  IP: 172.31.61.199

Do you want to use these values [ 1-yes or 2-no; "yes" is default ]?

Do you want to install the database or connect to an existing DB2? [ 1-install database or 2-connect to existing database; "1-inst default ]?

Running Prerequisite Scanner. This may take a few minutes depending on the number of checked components and machine's performance.
Setting Prerequisite Scanner output directory to user defined directory: /opt/ibm/ccm/logs/apm-prs_20160707_120418


Reading Prerequisite Scanner configuration files from user defined directory: /opt/ibm/ccm/logs/apm-prs_20160707_120418/config

IBM Prerequisite Scanner
     Version: 1.2.0.17
     Build  : 20150827
     OS name: Linux
   User name: root

 Machine Information
 Machine name: ip-172-31-61-199.ec2.internal
 Serial number:  ec214191-f7fd-600f-af67-faf3acd2c895


Scenario: Prerequisite Scan

IPDB2 - IBM Performance Management and IBM DB2 Server [version 08010300]:
Property                            Result    Found                                  Expected
========                            ======    =====                                  ========
db2.usersNotPresent                 WARN      db2apm,db2fenc1,dasusr1,itmuser        db2apm,db2fenc1,dasusr1,itmuser
db2.groupsNotPresent                WARN      db2iadm1,db2fadm1,dasadm1              db2iadm1,db2fadm1,dasadm1
os.isLDAPConfigured                 WARN      True                                   False


Overall result:   WARNING

Detailed results are also available in /opt/ibm/ccm/logs/apm-prs_20160707_120418/result.txt
The prerequisite check returned warnings. Installation can be continued, however is recommended to meet all above requirements.
Continue with this installation [ 1-yes or 2-no; "no" is default ]? 1

No further user input is required. The installation and configuration of components is now starting and may take up to one hour tonstallation log is available at "/opt/ibm/ccm/logs/apm-server-install_20160707_120418.log".

Installing DB2. Please wait...

Installing the Performance Management server. Please wait...
BDB5105 Verification of /var/lib/rpm/Packages succeeded.
BDB5105 Verification of /var/lib/rpm/Packages succeeded.
BDB5105 Verification of /var/lib/rpm/Packages succeeded.
BDB5105 Verification of /var/lib/rpm/Packages succeeded.

Starting components of the Performance Management server...
.....................
...
.................................

Configuring components of the Performance Management server...


All components are configured successfully.

Configuring agent installation images...
Agent installation images have been configured and are available in the following directory: /opt/ibm/ccm/depot.

The configuration of agent installation images can also be done manually.
To do this manual configuration, first create configuration packages by using the following script: /opt/ibm/ccm/make_configuration_packages.sh. Then, use the output packages from the first script and run the following one: /opt/ibm/ccm/configure_agent_images.sh.

Installer has detected existing keyfiles and/or agent configuration directories. They have been renamed to . If you want to use them to configure your server and/or agents please review the documentation.

Finalizing the installation...

The server size has been configured as 'small' based on the number of CPUs, amount of memory and free disk space. To reconfigure the server size, run script /opt/ibm/ccm/server_size.sh with the desired size as a parameter. Valid sizes are: extra_small, small, medium.
Please review the documentation at http://ibm.biz/mon_doc for more information.
To begin using the product, copy the configured agent images to the systems running the applications you want to monitor and install the agents. Log in to the Performance Management console using https://ip-172-31-61-199.ec2.internal:9443 and review the topics on the "Getting Started" page.

[root@ip-172-31-61-199 server]#



 ./make_configuration_packages.sh
./make_configuration_packages.sh: line 212: host: command not found
Enter the IP address/hostname that should be used by agents to communicate with the server.
Enter the IP address/hostname or accept the default [172.31.61.199]:

Configuration packages which contain the parameters for connecting to the server will be created and stored in a directory that you specify.
Enter the path to the directory where you want to store the configuration packages or accept the default value [/tmp/mkcustpkg_workdir.32514]: /opt/software/ccm/depot

Agents can connect to the server using secure https or unsecure http protocol.
Enter your choice [ 1-http, 2-https; "http" is default ]? ^C
[root@ip-172-31-61-199 ccm]# ./make_configuration_packages.sh
./make_configuration_packages.sh: line 212: host: command not found
Enter the IP address/hostname that should be used by agents to communicate with the server.
Enter the IP address/hostname or accept the default [172.31.61.199]:

Configuration packages which contain the parameters for connecting to the server will be created and stored in a directory that you specify.
Enter the path to the directory where you want to store the configuration packages or accept the default value [/tmp/mkcustpkg_workdir.5132]: /opt/ibm/ccm/depot

Agents can connect to the server using secure https or unsecure http protocol.
Enter your choice [ 1-http, 2-https; "http" is default ]?
Configuration packages are stored in the following directory: /opt/ibm/ccm/depot.
[root@ip-172-31-61-199 ccm]# cd /opt/ibm/ccm/depot/
[root@ip-172-31-61-199 depot]# ls
CNAR4ML_ipm_apm_adv_agents_xlinux_8.1.3.tar  onprem_config.tar  onprem_config.zip




]# ./configure_agent_images.sh

To pre-configure agent images or the Hybrid Gateway image, you must have created configuration packages with the parameters for connecting to the server. You either created the configuration packages as part of the installation of the infrastructure node or by using the make_configuration_packages.sh script on the system where the infrastructure node is installed.
Enter the path to the directory containing the configuration packages: /opt/software/APM/config_packageFiles

Agent installation images and the Hybrid Gateway image are pre-configured using this script.
Enter the path to directory where the images (compressed or uncompressed) are located: /opt/software/APM/agent

The configuration script copies the pre-configured images to another directory.
Enter the path to the directory where you want to store the pre-configured images or accept the default value /opt/ibm/ccm/depot:
Reading the file /opt/software/APM/agent/./CNAR4ML_ipm_apm_adv_agents_xlinux_8.1.3.tar
Writing the file /opt/ibm/ccm/depot/CNAR4ML_ipm_apm_adv_agents_xlinux_8.1.3.tar
Reading the file /opt/software/APM/agent/./CNAR5ML_ipm_apm_advanced_agents_win_8.1.3.zip
Writing the file /opt/ibm/ccm/depot/CNAR5ML_ipm_apm_advanced_agents_win_8.1.3.zip
Agent images are pre-configured and available in /opt/ibm/ccm/depot.

Wednesday, July 6, 2016

Copying files from S3 into EC2 Instance

# curl "https://s3.amazonaws.com/aws-cli/awscli-bundle.zip" -o "awscli-bundle.zip"                                % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100 6876k  100 6876k    0     0  30.2M      0 --:--:-- --:--:-- --:--:-- 30.3M


# ls
awscli-bundle.zip

# unzip awscli-bundle.zip
bash: unzip: command not found





# yum install unzip

# unzip awscli-bundle.zip
Archive:  awscli-bundle.zip

# cd awscli-bundle

# ./install -i /usr/local/aws -b /usr/local/bin/aws
Running cmd: /bin/python virtualenv.py --python /bin/python /usr/local/aws
Running cmd: /usr/local/aws/bin/pip install --no-index --find-links file:///opt/software/awscli-bundle/packages awscli-1.10.45.tar.gz
You can now run: /usr/local/bin/aws --version


# /usr/local/bin/aws --version
aws-cli/1.10.45 Python/2.7.5 Linux/3.10.0-327.el7.x86_64 botocore/1.4.35

Add aws path to the /etc/profile
export PATH=$PATH:/usr/local/bin

# aws configure
AWS Access Key ID [None]: <access_key>
AWS Secret Access Key [None]: <secret_access_key>
Default region name [None]: us-east-1
Default output format [None]:

# aws s3 cp s3://apmsoftware/CNAR3ML_ipm_apm_advanced_8.1.3.tar .

The aws s3 cp command is similar to the Unix cp command (the syntax is: aws s3 cp source destination).




Importing VM Image to Amazon EC2 as an Image

The steps documented here are on how to import vmware images into AWS as AMI images.
For importing the vmware images as EC2 instance, follow the documentation on amazon site.

Also this document is command line approach using the AWS and EC2 API.

Follow the documentation link on AWS site
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instances_of_your_vm.html

Figure below details the process and steps
VM Import/Export Architecture


Step 1) Install the AWS CLI. 
Step 2) Prepare the VM for import to Amazon EC2. 
Step 3) Export the VM from the virtualization environment. 
Step 4) Import the VM into Amazon EC2
Step 5) Launch the instance in Amazon EC2

----------------  STEP 1 ---------------------
Download and Install the AWS CLI Tools
(aws-cli  & ec2)


# unzip awscli-bundle.zip
Archive:  awscli-bundle.zip

# cd awscli-bundle

# ./install -i /usr/local/aws -b /usr/local/bin/aws
Running cmd: /bin/python virtualenv.py --python /bin/python /usr/local/aws
Running cmd: /usr/local/aws/bin/pip install --no-index --find-links file:///opt/software/awscli-bundle/packages awscli-1.10.45.tar.gz
You can now run: /usr/local/bin/aws --version


# /usr/local/bin/aws --version
aws-cli/1.10.45 Python/2.7.5 Linux/3.10.0-327.el7.x86_64 botocore/1.4.35


# aws configure
AWS Access Key ID [None]: <access_key>
AWS Secret Access Key [None]: <secret_access_key>
Default region name [None]: us-east-1
Default output format [None]:

(OR Follow the below method for aws-cli)

For installing pip, check this url
http://docs.aws.amazon.com/cli/latest/userguide/installing.html

[root@vm01 ~]# /usr/local/aws/bin/pip install awscli --ignore-installed six
You are using pip version 7.0.3, however version 8.1.2 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.
Collecting awscli
/usr/local/aws/lib/python2.6/site-packages/pip/_vendor/requests/packages/urllib3                                                      /util/ssl_.py:90: InsecurePlatformWarning: A true SSLContext object is not avail                                                      able. This prevents urllib3 from configuring SSL appropriately and may cause cer                                                      tain SSL connections to fail. For more information, see https://urllib3.readthed                                                      ocs.org/en/latest/security.html#insecureplatformwarning.
  InsecurePlatformWarning
  Downloading awscli-1.10.44-py2.py3-none-any.whl (970kB)
    100% |████████████████████████████████| 970kB 93kB/s
Collecting six
  Downloading six-1.10.0-py2.py3-none-any.whl
Collecting s3transfer==0.0.1 (from awscli)
  Downloading s3transfer-0.0.1-py2.py3-none-any.whl
Collecting colorama<=0.3.3,>=0.2.5 (from awscli)
  Downloading colorama-0.3.3.tar.gz
Collecting botocore==1.4.34 (from awscli)
  Downloading botocore-1.4.34-py2.py3-none-any.whl (2.4MB)
    100% |████████████████████████████████| 2.4MB 55kB/s
Collecting argparse>=1.1 (from awscli)
  Downloading argparse-1.4.0-py2.py3-none-any.whl
Collecting rsa<=3.5.0,>=3.1.2 (from awscli)
  Downloading rsa-3.4.2-py2.py3-none-any.whl (46kB)
    100% |████████████████████████████████| 49kB 3.5MB/s
Collecting docutils>=0.10 (from awscli)
  Downloading docutils-0.12.tar.gz (1.6MB)
    100% |████████████████████████████████| 1.6MB 143kB/s
Collecting futures<4.0.0,>=2.2.0 (from s3transfer==0.0.1->awscli)
  Downloading futures-3.0.5-py2-none-any.whl
Collecting ordereddict==1.1 (from botocore==1.4.34->awscli)
  Downloading ordereddict-1.1.tar.gz
Collecting jmespath<1.0.0,>=0.7.1 (from botocore==1.4.34->awscli)
  Downloading jmespath-0.9.0-py2.py3-none-any.whl
Collecting simplejson==3.3.0 (from botocore==1.4.34->awscli)
  Downloading simplejson-3.3.0.tar.gz (67kB)
    100% |████████████████████████████████| 69kB 532kB/s
Collecting python-dateutil<3.0.0,>=2.1 (from botocore==1.4.34->awscli)
  Downloading python_dateutil-2.5.3-py2.py3-none-any.whl (201kB)
    100% |████████████████████████████████| 204kB 404kB/s
Collecting pyasn1>=0.1.3 (from rsa<=3.5.0,>=3.1.2->awscli)
  Downloading pyasn1-0.1.9-py2.py3-none-any.whl
Building wheels for collected packages: colorama, docutils, ordereddict, simplejson
  Running setup.py bdist_wheel for colorama
  Stored in directory: /root/.cache/pip/wheels/21/c5/cf/63fb92293f3ad402644ccaf882903cacdb8fe87c80b62c84df
  Running setup.py bdist_wheel for docutils
  Stored in directory: /root/.cache/pip/wheels/db/de/bd/b99b1e12d321fbc950766c58894c6576b1a73ae3131b29a151
  Running setup.py bdist_wheel for ordereddict
  Stored in directory: /root/.cache/pip/wheels/cf/2c/b5/a1bfd8848f7861c1588f1a2dfe88c11cf3ab5073ab7af08bc9
  Running setup.py bdist_wheel for simplejson
  Stored in directory: /root/.cache/pip/wheels/5a/a5/b9/b0c89f0c5c40e2090601173e9b49091d41227c6377020e4e68
Successfully built colorama docutils ordereddict simplejson
Installing collected packages: futures, ordereddict, jmespath, simplejson, six, python-dateutil, docutils, botocore, s3transfer, colorama, argparse, pyasn1, rsa, awscli
Successfully installed argparse-1.3.0 awscli-1.10.44 botocore-1.4.33 colorama-0.3.3 docutils-0.12 futures-3.0.5 jmespath-0.9.0 ordereddict-1.1 pyasn1-0.1.9 python-dateutil-2.5.3 rsa-3.4.2 s3transfer-0.0.1 simplejson-3.3.0 six-1.10.0
[root@vm01 ~]#


For installing ec2 cli tools

wget http://s3.amazonaws.com/ec2-downloads/ec2-api-tools.zip

mkdir /usr/local/ec2  
unzip ec2-api-tools.zip -d /usr/local/ec2

set JAVA_HOME env variable
$ which java
/usr/bin/java

The which java command executed earlier returns Java's location in the $PATH environment variable, but in most cases this is a symbolic link. The JAVA_HOME environment variable requires the true path to the binary.

(Linux only) For Linux systems, you can recursively run the file command on the which java output until you find the binary. For example:
 
file $(which java)
/usr/bin/java: symbolic link to `/etc/alternatives/java'

The /usr/bin/java location is actually a link to /etc/alternatives/java, so you need to run the file command on that location to see whether that is the real binary.
 
file /etc/alternatives/java 
/etc/alternatives/java: symbolic link to `/usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java'

This returns a new location, which is the actual binary. Verify this by running the file command on this location:
 
file /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
/usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java: ELF 64-bit LSB executable...

This location is the actual binary (notice that it is listed as an executable). The Java home directory is where bin/java lives; in this example, the Java home directory is /usr/lib/jvm/java-7-openjdk-amd64/jre.


export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64
$JAVA_HOME/bin/java -version

export EC2_HOME=/usr/local/ec2/ec2-api-tools-1.7.0.0
export PATH=$PATH:$EC2_HOME/bin 
export AWS_ACCESS_KEY=your-aws-access-key-id  
export AWS_SECRET_KEY=your-aws-secret-key
  
Verify the tool is working

# ec2-describe-regions
REGION  ap-south-1      ec2.ap-south-1.amazonaws.com
REGION  eu-west-1       ec2.eu-west-1.amazonaws.com
REGION  ap-southeast-1  ec2.ap-southeast-1.amazonaws.com
REGION  ap-southeast-2  ec2.ap-southeast-2.amazonaws.com
REGION  eu-central-1    ec2.eu-central-1.amazonaws.com
REGION  ap-northeast-2  ec2.ap-northeast-2.amazonaws.com
REGION  ap-northeast-1  ec2.ap-northeast-1.amazonaws.com
REGION  us-east-1       ec2.us-east-1.amazonaws.com
REGION  sa-east-1       ec2.sa-east-1.amazonaws.com
REGION  us-west-1       ec2.us-west-1.amazonaws.com
REGION  us-west-2       ec2.us-west-2.amazonaws.com

----------------  STEP 2 ---------------------
Prepare the VM for import to Amazon EC2

1) login to your vm and create a user that will be used for remote access
# adduser netcool

2) Install the application required for remote access.
# apt-get install openssh-server sudo

3) Set sudo permissions for the user created in step 1.

# vi /etc/sudoers

add a new line user #User privilege specification section
netcool ALL=(ALL:ALL) ALL

4) Configure the vm to use DHCP
# echo > /etc/network/interfaces
# vi /etc/network/interfaces
 add
iface eth0 inet dhcp

# shutdown -h now

5)
 

------- STEP 3:  Export the VM from the virtualization environment. -----------


Use the vmware export tools to generate the OVA or VMDK files.

 for ova from vsphere client

File - Export





------- Step 4) Import the VM into Amazon EC2 -----------

Copy the imported ova image file to S3 bucket

Configure IAM and roles

- Create a new role called vmimport
- Select "Amazon EC2" Role Type
- Attach policy "AdministratorAccess"
-  Click and edit Trust Relationship tab and modify the policy to looks as below.
Change ServiceName to vmie.amazonaws.com


{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Sid": "",
      "Effect": "Allow",
      "Principal": {
        "Service": "vmie.amazonaws.com"
      },
      "Action": "sts:AssumeRole",
      "Condition": {
        "StringEquals": {
          "sts:ExternalId": "vmimport"
        }
      }

    }
  ]
}



Run the following command to import the image into AWS.


aws ec2 import-image --description "Grafana Linux OVA Image" --disk-containers file://containers.json

containers.json has the options defined. What is the format of the file, what is S3 Bucket Name, What is the S3 file name.

[{
    "Description": "Grafana Image",
    "Format": "ova",
    "UserBucket": {
        "S3Bucket": "grafanaimage",
        "S3Key": "Public_Grafana.ova"
    }
}]


Use the below command to check the status of the image conversion.

aws ec2 describe-import-image-tasks --import-task-ids "import-ami-fgdn6shf"


Once the image is converted, the AMI image for AWS can be found under EC2 Service Dashboard. Select Images->AMIs to see your converted image.


ec2-import-instance "<path-to-vm-image>" -t <instance type> -f <format> -a <architecture> -b <s3 bucket> -o <Access Key ID> -w <Secret Access Key>
 
 
 
 # ec2-import-instance Predict-disk1.vmdk -f VMDK -t m3.xlarge -a x86_64 -b predictive-insights-files -o AKIAJLSZNLHQXXOWWNEA -w Rr4RQsqGoLgdhvXKRk2651zQ0ImkVQRFvb926qJ4 -p Linux
 

Linux is not supported on the requested instance

Cause: Linux import is only supported on specific instance types. You attempted to import an unsupported instance type.
Resolution: Retry using one of the supported instance types. Microsoft Windows BYOL instances must be launched as a dedicated instances or dedicated hosts for Microsoft Windows, and therefore cannot use the t2 instance type because it doesn't support dedicated instances.
  • General purpose: t2.micro | t2.small | t2.medium | m3.medium | m3.large | m3.xlarge | m3.2xlarge
  • Compute optimized: c3.large | c3.xlarge | c3.2xlarge | c3.4xlarge | C3.8xlarge | cc1.4xlarge
  • Memory optimized: r3.large | r3.xlarge | r3.2xlarge | r3.4xlarge | r3.8xlarge
  • Storage optimized: i2.xlarge | i2.2xlarge | i2.4xlarge | i2.8xlarge


 For troubleshooting the vm import/export, follow this link
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/VMImportTroubleshooting.html#LinuxNotSupported
 
 

Friday, July 1, 2016

Configuring and Working with Cloudwatch Logs

Below details how to setup Cloudwatch log agent on the EC2 instances

# yum update -y

#yum install -y awslogs ( this command may not work for RH, CenOS, use below steps)

# curl https://s3.amazonaws.com/aws-cloudwatch/downloads/latest/awslogs-agent-setup.py -O
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100 47998  100 47998    0     0   419k      0 --:--:-- --:--:-- --:--:--  422k

# ls
awslogs-agent-setup.py

# python ./awslogs-agent-setup.py  --region us-east-1
Launching interactive setup of CloudWatch Logs agent ...

Step 1 of 5: Installing pip ...DONE
Step 2 of 5: Downloading the latest CloudWatch Logs agent bits ... DONE
Step 3 of 5: Configuring AWS CLI ...
AWS Access Key ID [None]: <aws-access-key>
AWS Secret Access Key [None]:<aws-seceret-access-key>
Default region name [us-east-1]:
Default output format [None]:

Step 4 of 5: Configuring the CloudWatch Logs Agent ...
Path of log file to upload [/var/log/messages]:
Destination Log Group name [/var/log/messages]:

Choose Log Stream name:
  1. Use EC2 instance id.
  2. Use hostname.
  3. Custom.
Enter choice [1]:

Choose Log Event timestamp format:
  1. %b %d %H:%M:%S    (Dec 31 23:59:59)
  2. %d/%b/%Y:%H:%M:%S (10/Oct/2000:13:55:36)
  3. %Y-%m-%d %H:%M:%S (2008-09-08 11:52:54)
  4. Custom
Enter choice [1]: 3

Choose initial position of upload:
  1. From start of file.
  2. From end of file.
Enter choice [1]: 1
More log files to configure? [Y]: n

Step 5 of 5: Setting up agent as a daemon ...DONE


------------------------------------------------------
- Configuration file successfully saved at: /var/awslogs/etc/awslogs.conf
- You can begin accessing new log events after a few moments at https://console.aws.amazon.com/cloudwatch/home?region=us-east-1#logs:
- You can use 'sudo service awslogs start|stop|status|restart' to control the daemon.
- To see diagnostic information for the CloudWatch Logs Agent, see /var/log/awslogs.log
- You can rerun interactive setup using 'sudo python ./awslogs-agent-setup.py --region us-east-1 --only-generate-config'
------------------------------------------------------
#

Check your cloudwatch dashboard to see if any logs are coming in your stream.


Now, how to collect the logs from cloudwatch. On your local system or desktop, install awslogs from github

https://github.com/jorgebastida/awslogs

# pip install awslogs
/usr/local/aws/lib/python2.6/site-packages/pip/_vendor/requests/packages/urllib3/util/ssl_.py:90: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning.
  InsecurePlatformWarning
You are using pip version 7.0.3, however version 8.1.2 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.
Collecting awslogs
/usr/local/aws/lib/python2.6/site-packages/pip/_vendor/requests/packages/urllib3/util/ssl_.py:90: InsecurePlatformWarning: A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail. For more information, see https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning.
  InsecurePlatformWarning
  Downloading awslogs-0.5.0.tar.gz
Collecting boto3>=1.2.1 (from awslogs)
  Downloading boto3-1.3.1-py2.py3-none-any.whl (113kB)
    100% |████████████████████████████████| 114kB 2.8MB/s
Collecting termcolor>=1.1.0 (from awslogs)
  Downloading termcolor-1.1.0.tar.gz
Requirement already satisfied (use --upgrade to upgrade): python-dateutil>=2.4.0 in /usr/local/aws/lib/python2.6/site-packages (from awslogs)
Requirement already satisfied (use --upgrade to upgrade): argparse>=1.1.0 in /usr/local/aws/lib/python2.6/site-packages (from awslogs)
Requirement already satisfied (use --upgrade to upgrade): futures<4.0.0,>=2.2.0 in /usr/local/aws/lib/python2.6/site-packages (from boto3>=1.2.1->awslogs)
Requirement already satisfied (use --upgrade to upgrade): jmespath<1.0.0,>=0.7.1 in /usr/local/aws/lib/python2.6/site-packages (from boto3>=1.2.1->awslogs)
Requirement already satisfied (use --upgrade to upgrade): botocore<1.5.0,>=1.4.1 in /usr/local/aws/lib/python2.6/site-packages (from boto3>=1.2.1->awslogs)
Requirement already satisfied (use --upgrade to upgrade): six>=1.5 in /usr/local/aws/lib/python2.6/site-packages (from python-dateutil>=2.4.0->awslogs)
Requirement already satisfied (use --upgrade to upgrade): ordereddict==1.1 in /usr/local/aws/lib/python2.6/site-packages (from botocore<1.5.0,>=1.4.1->boto3>=1.2.1->awslogs)
Requirement already satisfied (use --upgrade to upgrade): simplejson==3.3.0 in /usr/local/aws/lib/python2.6/site-packages (from botocore<1.5.0,>=1.4.1->boto3>=1.2.1->awslogs)
Requirement already satisfied (use --upgrade to upgrade): docutils>=0.10 in /usr/local/aws/lib/python2.6/site-packages (from botocore<1.5.0,>=1.4.1->boto3>=1.2.1->awslogs)
Building wheels for collected packages: awslogs, termcolor
  Running setup.py bdist_wheel for awslogs
  Stored in directory: /root/.cache/pip/wheels/21/ca/1d/09b0bc28e47edd432789b3670bb7cd0d116ccfa54a83ab42a5
  Running setup.py bdist_wheel for termcolor
  Stored in directory: /root/.cache/pip/wheels/de/f7/bf/1bcac7bf30549e6a4957382e2ecab04c88e513117207067b03
Successfully built awslogs termcolor
Installing collected packages: boto3, termcolor, awslogs
Successfully installed awslogs-0.5.0 boto3-1.3.1 termcolor-1.1.0


Once the tool is installed. run the below commands to get the output from cloudwatch.

awslogs get /var/log/messages --start='1h ago' | grep xenbus
awslogs get /var/log/messages --start='45 minutes'



awslogs get /var/log/messages --start='2 days ago' --no-color (this is to avoid the coloring and wild characters when you start writing to other file)


aws logs get-log-events --log-group-name /var/log/messages --log-stream-name i-097fb8cbe37dfb658 --output text 
Have Fun.

Configuring and Working with AWS Cloudwatch CLI

AWS documentation for reference
http://docs.aws.amazon.com/AmazonCloudWatch/latest/cli/SetupCLI.html

1) Download CloudWatch-2010-08-01.zip and unzip in your AWS folder
2) Configure the credentials file. the template is in the unzip folder

# cd /opt/AWS/CloudWatch-1.0.20.0
# cat credential-file-path.template
AWSAccessKeyId=<access_key>
AWSSecretKey=<access_seceret_key>



3) Configure /etc/profile and set the env variables as below
export AWS_CLOUDWATCH_HOME=/opt/AWS/CloudWatch-1.0.20.0
export PATH=$PATH:$AWS_CLOUDWATCH_HOME/bin
export AWS_CREDENTIAL_FILE=$AWS_CLOUDWATCH_HOME/credential-file-path.template
export AWS_CLOUDWATCH_URL=http://monitoring.us-east-1.amazonaws.com
export JAVA_HOME=/usr


You will need the actual JAVA_HOME. To find the java folder,
# which java
/bin/java

# cd /bin/
# ls -lt java
lrwxrwxrwx. 1 root root 22 Jul 12 10:49 java -> /etc/alternatives/java

# cd /etc/alternatives/
# ls -lt java
lrwxrwxrwx. 1 root root 72 Jul 12 10:49 java -> /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.91-1.b14.el7_2.x86_64/jre/bin/java

Add the JRE folder path to your JAVA_HOME environment variable
# env | grep JAVA
JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.91-1.b14.el7_2.x86_64/jre



4) run the command to test your env is working or not.
# # mon-cmd
Command Name                       Description
------------                       -----------
help
mon-delete-alarms                  Delete alarms
mon-describe-alarm-history         Describe alarm history
mon-describe-alarms                Describe alarms fully.
mon-describe-alarms-for-metric     Describe all alarms associated with a single metric
mon-disable-alarm-actions          Disable all actions for a given alarm
mon-enable-alarm-actions           Enable all actions for a given alarm
mon-get-stats                      Get metric statistics
mon-list-metrics                   List user's metrics
mon-put-data                       Put metric data
mon-put-metric-alarm               Create a new alarm or update an existing one
mon-set-alarm-state                Manually set the state of an alarm
version                            Prints the version of the CLI tool and the API.

    For help on a specific command, type '<commandname> --help'


# mon-get-stats CPUUtilization --start-time 2016-06-30T10:00:00.000Z --end-time 2016-06-30T21:25:00.000Z --period 60 --statistics "Average,Minimum,Maximum,SampleCount" --namespace "AWS/EC2" --dimensions "InstanceId=i-0ea22089a33d69cdd"

# mon-get-stats CPUUtilization --start-time 2016-06-30T18:00:00.000Z --end-time 2016-06-30T18:30:00.000Z --statistics "Average,Minimum,Maximum,SampleCount" --namespace "AWS/EC2" --dimensions "InstanceId=i-0f6578719a55264ee" --show-table --headers --delimiter "|" --period 600


sample output with headers
Time Samples Average Unit
2013-05-19 00:03:00 2.0 0.19 Percent

Output

This command returns a table that contains the following:
  • Time - Time the metrics were taken.
  • SampleCount - No description available for this column.
  • Average - Average value.
  • Sum - Sum of values.
  • Minimum - Minimum observed value.
  • Maximum - Maximum observed value.
  • Unit - Unit of the metric.
The Amazon CloudWatch CLI displays errors on stderr.


Different AWS CW Metrics for EC2
  • CPUCreditUsage
  • CPUCreditBalance
  • CPUUtilization
  • DiskReadOps
  • DiskWriteOps
  • DiskReadBytes
  • DiskWriteBytes
  • NetworkIn
  • NetworkOut
  • NetworkPacketsIn
  • NetworkPacketsOut
  • StatusCheckFailed
  • StatusCheckFailed_Instance
  • StatusCheckFailed_System

If you need instance id for automating a code. below is the command to use

# aws ec2 describe-instances | grep InstanceId | awk '{ print $2 }'
"i-0ea22089a33d69cdd",
"i-0f6578719a55264ee",
"i-097fb8cbe37dfb658",