#######################################################
# install python, python-dev python2 #
#######################################################
apt install python
apt instlal python2
curl https://bootstrap.pypa.io/get-pip.py –output get-pip.py
sudo python2 get-pip.py
sudo apt install python3-pip
apt install python-dev-is-python2
apt install python-dev-is-python3
#check status
pydoc
#check version
pip –version
#######################################################
# install git and clone repo of flowanalyzer #
#######################################################
sudo apt-get install git
#Clone the Git repository:
git clone https://gitlab.com/thart/flowanalyzer.git
#The download should only take a moment. Move into the repo directory:
cd flowanalyzer
#The ubuntu_install_elk5.sh script handles almost everything, just be sure to run it with sudo privileges:
sudo sh ./Install/ubuntu_install_elk5.sh
#Build Elasticsearch Indexes
#Index templates need to be created in Elasticsearch, so when data is collected the fields will be assigned the right data types and proper indexing settings.
#Netflow Index
#The build_netflow_index_elk5.sh script creates the default index for storing Netflow and IPFIX flow data in Elasticsearch:
sh ./Install/build_netflow_index_elk5.sh
#This should output the following message, indicating the template has been created:
{
“acknowledged” : true
}
#sFlow Index
#Run the sFlow index script:
sh ./Install/build_sflow_index_elk5.sh
#This should output the following message, indicating the template has been created:
{
“acknowledged” : true
}
#These are examples of commands you may need to use if you’re running a firewall on the Ubuntu Server installation:
#firewall
#ufw allow from xxx.xxx.xxx.xxx/xx to any port 80 proto tcp comment “Kibana interface”
#ufw allow from xxx.xxx.xxx.xxx/xx to any port 9200 proto tcp comment “Elasticsearch CLI”
#ufw allow from xxx.xxx.xxx.xxx/xx to any port 2055,9995,4739,6343 proto udp comment “Flow data in”
#It’s important to reboot so that we’re sure the services were registered and start correctly:
sudo reboot
#Once the Ubuntu instance comes back up verify that the services have started:
systemctl status elasticsearch
systemctl status kibana
systemctl status netflow_v5
systemctl status netflow_v9
systemctl status ipfix
systemctl status sflow
#Disable Unused Collector Services
#sudo systemctl stop sflow
#sudo systemctl disable sflow
#sudo systemctl stop ipfix
#sudo systemctl disable ipfix
#Configure Devices
#version 5
/ip traffic-flow set enabled=yes interfaces=ether1
/ip traffic-flow target add dst-address=urhostip port=2055 version=5
#version 9
/ip traffic-flow set enabled=yes interfaces=ether1
/ip traffic-flow target add dst-address=yourhostip port=9995 version=9
#Service Protocol Port Information
Netflow v5 UDP 2055 Basic flow monitoring
Netflow v9 UDP 9995 Intermediate flow monitoring
IPFIX UDP 4739 Advanced flow monitoring
sFlow UDP 6343 Advanced flow and performance monitoring
Access Kibana
#There are a couple ways to access Kibana, depending on if you’re using the reverse proxy for authentication or not.
#No Reverse Proxy (default):
Browse to Kibana at http://your_server_ip:5601
#Using a Reverse Proxy (optional, only if configured)
1. Browse to Kibana at http://your_server_ip
#Log in with the default Squid credentials you created during the Squid configuration.
Configure Index Patterns
#On first logging in to Kibana you’ll notice an orange Warning message that states:
# -No default index pattern. You must select or create one to continue.
# -The Netflow and sFlow index scripts already created the index templates and your devices should have been sending flows already which triggers index creation, but we need to point Kibana in the right direction.
# -Netflow Index Pattern
# -Use the following steps to point Kibana to the Netflow index:
# -Click Index Patterns
# -Click the Add New button in the upper-left if not already placed at the Configure an index pattern page
# -Under Index name or pattern enter flow*
# -Click outside the entry field and it should automatically parse your input, revealing more information below
#Note: If you haven’t already configured your devices to send flows to the collector go back and perform that configuration because the following steps won’t work.
#Leave the automatically selected Time field under Time-field name.
#Click the Create button.
#You will be taken to the flow* index pattern.
#sFlow Index Pattern
#Use the following steps to point Kibana to the sFlow index:
#Click Index Patterns
#Click the Add New button in the upper-left if not already placed at the Configure an index pattern page
#Under Index name or pattern enter sflow*
#Click outside the entry field and it should automatically parse your input, revealing more information below
#Note: If you haven’t already configured your devices to send flows to the collector go back and perform that configuration because the following steps won’t work.
#Leave the automatically selected Time field under Time-field name.
#Click the Create button.
#You will be taken to the sflow* index pattern.
Import Kibana Visualizations and Dashboards
#Download the export.json from the Kibana directory.
#In Kibana perform the following steps:
#Click Management on the left navigation bar
#Click Saved Objects at the top of the window
#Click the Import button
#Browse to the downloaded JSON file
Update
#To get the latest updates do the following:
#Change to the flowanalyzer directory and fetch the latest stable code via Git from the Master branch:
cd /~
cd flowanalyzer
git pull
#Restart the collector services you are actively using on your network (all are listed here for documentation purposes):
systemctl restart netflow_v5
systemctl restart netflow_v9
systemctl restart ipfix
systemctl restart sflow
Elasticsearch Clustering
#Elasticsearch works best in a cluster, and as your Elasticsearch cluster grows you’ll get better performance and more storage. The default installation creates one instance of Elasticsearch, which is fine for testing or small organizations, but you’ll get the best performance from two or three (or more!) instances of Elasticsearch. As you sample more traffic from more devices you can grow the cluster over time.
#Fortunately almost everything you need is included in the default installation script, you just need to tweak a few options and Elasticsearch does the rest automatically.
##################################################
# Debugging /log and troubleshoot #
##################################################
#Collector Logs
#The collector’s respective logs are handled by SystemD and are easily viewable through the journalctl command:
journalctl -u netflow_v5
journalctl -u netflow_v9
journalctl -u ipfix
journalctl -u sflow
#Using the –follow option you can have journalctl scroll through the log for you as it’s updated. Using the netflow_v5 service as an example:
journalctl -u netflow_v5 –follow
Elasticsearch Status
#If the Elasticsearch service isn’t running in the background it’s impossible to store flow data.
#Check the status of the Elasticsearch service and its logs to verify it’s online and healthy:
systemctl status elasticsearch
journalctl -u elasticsearch
Kibana Status
#If the Kibana service isn’t running in the background you won’t be able to visualize and dashboard data in Elasticsearch.
#Check the status of the Kibana service and its logs to verify it’s online and healthy:
systemctl status kibana
journalctl -u kibana
Squid Status
#Note: This is an optional feature. If you’re using Squid to put authentication in front of Kibana verify Squid is running. Check the status of the Squid service and its logs to verify it’s online and healthy:
systemctl status squid
journalctl -u squid
Launch Python Collector
#First, stop the collector service already running (e.g. Netflow v9) in the background so the port will be available:
sudo systemctl netflow_v9 stop
#Change directory to where the Flow Analyzer is running, then the Python directory:
cd /your/directory/flowanalyzer/Python
#Run the collector Python file using the -l or –log debug options:
python netflow_v9.py -l info
#or…
python netflow_v9.py –log=debug
Recent Comments