Select Page

Netflow, IPFIX, and Elasticsearch

The Flow Analyzer project integrates your Netflow v5, Netflow v9, and IPFIX flow data with Elasticsearch and allows you to graph and dashboard it in Kibana.

It’s a full-stack solution with custom collectors written in Python,  additional flow tagging and categorization logic, and storage in Elasticsearch.

Once your flow data is tagged, categorized, and stored it can be visualized in Kibana.

Prerequisite:

Fresh Ubuntu 20.4.1
4G RAM
100 HARD DISK

sudo passwd root
apt -y update && apt -y upgrade

# ******************* #
# *** PASTE BREAK *** #
# ******************* #
#Set email
ADMIN_EMAIL=youremail
ADMIN_NAME=”netflow”
MAILTO=$ADMIN_EMAIL
HOSTNAME=netflow
DOMAINNAME=yourdomain
TIME_ZONE=”Asia/Kuala_Lumpur”
LOCATION=”Kuala_lumpur”

PHP_VERSION=7.4
PHP_VERSION_SHORT=74

rm /etc/localtime; ln -s /usr/share/zoneinfo/$TIME_ZONE /etc/localtime
ls -la /etc/localtime

echo “
# Kernel hostname
kernel.hostname=$HOSTNAME.$DOMAINNAME
# Kernel domain name
kernel.domainname=`echo $DOMAINNAME`” >> /etc/sysctl.conf

echo -e “127.0.0.1\t`echo $HOSTNAME`.`echo $DOMAINNAME` `echo $HOSTNAME`” >> /etc/hosts
echo -e “::1\t`echo $HOSTNAME`.`echo $DOMAINNAME` `echo $HOSTNAME`” >> /etc/hosts

sudo hostnamectl set-hostname $HOSTNAME
sysctl -p /etc/sysctl.conf

 

sudo apt install apt-transport-https ca-certificates curl software-properties-common -y
# ******************* #
# *** PASTE BREAK *** #
# ******************* #
#######################################################
# SSH Config improvements to login #
#######################################################

# Set SSHD value for user
mkdir ~/.ssh

# Backup sshd settings
cp /etc/ssh/sshd_config /etc/ssh/sshd_config.factory-defaults

sed -i ‘s/GSSAPIAuthentication yes/GSSAPIAuthentication no/’ /etc/ssh/ssh_config
echo ‘UseDNS yes’ >> /etc/ssh/sshd_config

# allow ssh root login
sudo sed -i ‘s/#PermitRootLogin prohibit-password/PermitRootLogin yes/’ /etc/ssh/sshd_config

 

# Enable root file edit for WinSCP
sudo echo ‘
#### added
sysadmin ALL=NOPASSWD: /usr/lib/openssh/sftp-server’ >> /etc/sudoers

service sshd restart
# ******************* #
#.       PASTE BREAK   #
# ******************* #
#######################################################
# file limits #
#######################################################

## setup high volume file access mode
ulimit -n 131070

## add below lines to
echo -e “root\tsoft\tnofile\t120000
root\thard\tnofile\t300000
mysql\tsoft\tnofile\t120000
mysql\thard\tnofile\t300000
apache\tsoft\tnofile\t120000
apache\thard\tnofile\t300000
nginx\tsoft\tnofile\t120000
nginx\thard\tnofile\t300000″ >> /etc/security/limits.conf

#######################################################
# install python, python-dev python2 #
#######################################################

 

apt install python
apt instlal python2
curl https://bootstrap.pypa.io/get-pip.py –output get-pip.py
sudo python2 get-pip.py

 

sudo apt install python3-pip

 

apt install python-dev-is-python2

 

apt install python-dev-is-python3
#check status
pydoc
#check version
pip –version

 

#######################################################
# install git and clone repo of flowanalyzer #
#######################################################
sudo apt-get install git

 

#Clone the Git repository:
git clone https://gitlab.com/thart/flowanalyzer.git
#The download should only take a moment. Move into the repo directory:
cd flowanalyzer

 

#The ubuntu_install_elk5.sh script handles almost everything, just be sure to run it with sudo privileges:
sudo sh ./Install/ubuntu_install_elk5.sh

 

#Build Elasticsearch Indexes
#Index templates need to be created in Elasticsearch, so when data is collected the fields will be assigned the right data types and proper indexing settings.

 

#Netflow Index
#The build_netflow_index_elk5.sh script creates the default index for storing Netflow and IPFIX flow data in Elasticsearch:
sh ./Install/build_netflow_index_elk5.sh

 

#This should output the following message, indicating the template has been created:
{
“acknowledged” : true
}

 

#sFlow Index
#Run the sFlow index script:
sh ./Install/build_sflow_index_elk5.sh
#This should output the following message, indicating the template has been created:
{
“acknowledged” : true
}

 

#These are examples of commands you may need to use if you’re running a firewall on the Ubuntu Server installation:

 

#firewall
#ufw allow from xxx.xxx.xxx.xxx/xx to any port 80 proto tcp comment “Kibana interface”
#ufw allow from xxx.xxx.xxx.xxx/xx to any port 9200 proto tcp comment “Elasticsearch CLI”
#ufw allow from xxx.xxx.xxx.xxx/xx to any port 2055,9995,4739,6343 proto udp comment “Flow data in”

 

#It’s important to reboot so that we’re sure the services were registered and start correctly:
sudo reboot

 

#Once the Ubuntu instance comes back up verify that the services have started:
systemctl status elasticsearch
systemctl status kibana
systemctl status netflow_v5
systemctl status netflow_v9
systemctl status ipfix
systemctl status sflow

 

#Disable Unused Collector Services
#sudo systemctl stop sflow
#sudo systemctl disable sflow
#sudo systemctl stop ipfix
#sudo systemctl disable ipfix

 

#Configure Devices
#version 5
/ip traffic-flow set enabled=yes interfaces=ether1
/ip traffic-flow target add dst-address=urhostip  port=2055 version=5

 

#version 9
/ip traffic-flow set enabled=yes interfaces=ether1
/ip traffic-flow target add dst-address=yourhostip  port=9995 version=9

 

#Service Protocol Port Information
Netflow v5 UDP 2055 Basic flow monitoring
Netflow v9 UDP 9995 Intermediate flow monitoring
IPFIX UDP 4739 Advanced flow monitoring
sFlow UDP 6343 Advanced flow and performance monitoring
Access Kibana
#There are a couple ways to access Kibana, depending on if you’re using the reverse proxy for authentication or not.

 

#No Reverse Proxy (default):
Browse to Kibana at http://your_server_ip:5601

 

#Using a Reverse Proxy (optional, only if configured)
1. Browse to Kibana at http://your_server_ip
#Log in with the default Squid credentials you created during the Squid configuration.

 

Configure Index Patterns
#On first logging in to Kibana you’ll notice an orange Warning message that states:
# -No default index pattern. You must select or create one to continue.
# -The Netflow and sFlow index scripts already created the index templates and your devices should have been sending flows already which triggers index creation, but we need to point Kibana in the right direction.
# -Netflow Index Pattern
# -Use the following steps to point Kibana to the Netflow index:
# -Click Index Patterns
# -Click the Add New button in the upper-left if not already placed at the Configure an index pattern page
# -Under Index name or pattern enter flow*
# -Click outside the entry field and it should automatically parse your input, revealing more information below

 

#Note: If you haven’t already configured your devices to send flows to the collector go back and perform that configuration because the following steps won’t work.

 

#Leave the automatically selected Time field under Time-field name.
#Click the Create button.
#You will be taken to the flow* index pattern.
#sFlow Index Pattern
#Use the following steps to point Kibana to the sFlow index:
#Click Index Patterns
#Click the Add New button in the upper-left if not already placed at the Configure an index pattern page
#Under Index name or pattern enter sflow*
#Click outside the entry field and it should automatically parse your input, revealing more information below
#Note: If you haven’t already configured your devices to send flows to the collector go back and perform that configuration because the following steps won’t work.
#Leave the automatically selected Time field under Time-field name.
#Click the Create button.
#You will be taken to the sflow* index pattern.

 

Import Kibana Visualizations and Dashboards
#Download the export.json from the Kibana directory.
#In Kibana perform the following steps:
#Click Management on the left navigation bar
#Click Saved Objects at the top of the window
#Click the Import button
#Browse to the downloaded JSON file

 

Update
#To get the latest updates do the following:
#Change to the flowanalyzer directory and fetch the latest stable code via Git from the Master branch:
cd /~
cd flowanalyzer
git pull

 

#Restart the collector services you are actively using on your network (all are listed here for documentation purposes):
systemctl restart netflow_v5
systemctl restart netflow_v9
systemctl restart ipfix
systemctl restart sflow
Elasticsearch Clustering
#Elasticsearch works best in a cluster, and as your Elasticsearch cluster grows you’ll get better performance and more storage. The default installation creates one instance of Elasticsearch, which is fine for testing or small organizations, but you’ll get the best performance from two or three (or more!) instances of Elasticsearch. As you sample more traffic from more devices you can grow the cluster over time.
#Fortunately almost everything you need is included in the default installation script, you just need to tweak a few options and Elasticsearch does the rest automatically.
##################################################
# Debugging /log and troubleshoot #
##################################################
#Collector Logs
#The collector’s respective logs are handled by SystemD and are easily viewable through the journalctl command:
journalctl -u netflow_v5
journalctl -u netflow_v9
journalctl -u ipfix
journalctl -u sflow
#Using the –follow option you can have journalctl scroll through the log for you as it’s updated. Using the netflow_v5 service as an example:
journalctl -u netflow_v5 –follow

 

Elasticsearch Status
#If the Elasticsearch service isn’t running in the background it’s impossible to store flow data.
#Check the status of the Elasticsearch service and its logs to verify it’s online and healthy:
systemctl status elasticsearch
journalctl -u elasticsearch

 

Kibana Status
#If the Kibana service isn’t running in the background you won’t be able to visualize and dashboard data in Elasticsearch.
#Check the status of the Kibana service and its logs to verify it’s online and healthy:
systemctl status kibana
journalctl -u kibana

 

Squid Status
#Note: This is an optional feature. If you’re using Squid to put authentication in front of Kibana verify Squid is running. Check the status of the Squid service and its logs to verify it’s online and healthy:
systemctl status squid
journalctl -u squid

 

Launch Python Collector
#First, stop the collector service already running (e.g. Netflow v9) in the background so the port will be available:
sudo systemctl netflow_v9 stop
#Change directory to where the Flow Analyzer is running, then the Python directory:
cd /your/directory/flowanalyzer/Python
#Run the collector Python file using the -l or –log debug options:
python netflow_v9.py -l info
#or…
python netflow_v9.py –log=debug