ElasticSearch Filebeat custom index

Custom Template and Index pattern setup.

    setup.ilm.enabled: false               #Set ilm to False 
    setup.template.name: "k8s-dev"         #Create Custom Template
    setup.template.pattern: "k8s-dev-*"    #Create Custom Template pattern
    setup.template.settings:
      index.number_of_shards: 1    #Set number_of_shards 1, ONLY if you have ONE NODE ES
      index.number_of_replicas: 0#Set number_of_replicas 1, ONLY if you have ONE NODE ES
    output.elasticsearch:
       hosts: ['192.168.1.142:9200']
       index: "k8s-dev-%{+yyyy.MM.dd}" #Set k8s-dev-2020.01.01 as Index name

filebeat-kubernetes.yaml

---
apiVersion: v1
kind: ConfigMap
metadata:
  name: filebeat-config
  namespace: kube-system
  labels:
    k8s-app: filebeat
data:
  filebeat.yml: |-
    filebeat.inputs:
    - type: container
      paths:
        - /var/log/containers/*.log
      processors:
        - add_kubernetes_metadata:
            host: ${NODE_NAME}
            matchers:
            - logs_path:
                logs_path: "/var/log/containers/"

    # To enable hints based autodiscover, remove `filebeat.inputs` configuration and uncomment this:
    #filebeat.autodiscover:
    #  providers:
    #    - type: kubernetes
    #      node: ${NODE_NAME}
    #      hints.enabled: true
    #      hints.default_config:
    #        type: container
    #        paths:
    #          - /var/log/containers/*${data.kubernetes.container.id}.log

    processors:
      - add_cloud_metadata:
      - add_host_metadata:

    cloud.id: ${ELASTIC_CLOUD_ID}
    cloud.auth: ${ELASTIC_CLOUD_AUTH}

    setup.ilm.enabled: false
    setup.template.name: "k8s-dev"
    setup.template.pattern: "k8s-dev-*"
    setup.template.settings:
      index.number_of_shards: 1
      index.number_of_replicas: 0

    output.elasticsearch:
       hosts: ['192.168.1.142:9200']
       index: "k8s-dev-%{+yyyy.MM.dd}"

---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: filebeat
  namespace: kube-system
  labels:
    k8s-app: filebeat
spec:
  selector:
    matchLabels:
      k8s-app: filebeat
  template:
    metadata:
      labels:
        k8s-app: filebeat
    spec:
      serviceAccountName: filebeat
      terminationGracePeriodSeconds: 30
      hostNetwork: true
      dnsPolicy: ClusterFirstWithHostNet
      containers:
      - name: filebeat
        image: docker.elastic.co/beats/filebeat:7.6.2
        args: [
          "-c", "/etc/filebeat.yml",
          "-e",
        ]
        env:
        - name: ELASTICSEARCH_HOST
          value: "192.168.1.142"
        - name: ELASTICSEARCH_PORT
          value: "9200"
        - name: ELASTIC_CLOUD_ID
          value:
        - name: ELASTIC_CLOUD_AUTH
          value:
        - name: NODE_NAME
          valueFrom:
            fieldRef:
              fieldPath: spec.nodeName
        securityContext:
          runAsUser: 0
          # If using Red Hat OpenShift uncomment this:
          #privileged: true
        resources:
          limits:
            memory: 200Mi
          requests:
            cpu: 100m
            memory: 100Mi
        volumeMounts:
        - name: config
          mountPath: /etc/filebeat.yml
          readOnly: true
          subPath: filebeat.yml
        - name: data
          mountPath: /usr/share/filebeat/data
        - name: varlibdockercontainers
          mountPath: /var/lib/docker/containers
          readOnly: true
        - name: varlog
          mountPath: /var/log
          readOnly: true
      volumes:
      - name: config
        configMap:
          defaultMode: 0600
          name: filebeat-config
      - name: varlibdockercontainers
        hostPath:
          path: /var/lib/docker/containers
      - name: varlog
        hostPath:
          path: /var/log
      # data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart
      - name: data
        hostPath:
          path: /var/lib/filebeat-data
          type: DirectoryOrCreate
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: filebeat
subjects:
- kind: ServiceAccount
  name: filebeat
  namespace: kube-system
roleRef:
  kind: ClusterRole
  name: filebeat
  apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: filebeat
  labels:
    k8s-app: filebeat
rules:
- apiGroups: [""] # "" indicates the core API group
  resources:
  - namespaces
  - pods
  verbs:
  - get
  - watch
  - list
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: filebeat
  namespace: kube-system
  labels:
    k8s-app: filebeat
---

Index will appear in kibana:

Create index pattern in Kibana:

Finally:…

Mac cleanup

#user cache file
echo "cleaning user cache file from ~/Library/Caches"
rm -rf ~/Library/Caches/*
echo "done cleaning from ~/Library/Caches"
#user logs
echo "cleaning user log file from ~/Library/logs"
rm -rf ~/Library/logs/*
echo "done cleaning from ~/Library/logs"
#user preference log
echo "cleaning user preference logs"
#rm -rf ~/Library/Preferences/*
echo "done cleaning from /Library/Preferences"
#system caches
echo "cleaning system caches"
sudo rm -rf /Library/Caches/*
echo "done cleaning system cache"
#system logs
echo "cleaning system logs from /Library/logs"
sudo rm -rf /Library/logs/*
echo "done cleaning from /Library/logs"
echo "cleaning system logs from /var/log"
sudo rm -rf /var/log/*
echo "done cleaning from /var/log"
echo "cleaning from /private/var/folders"
sudo rm -rf /private/var/folders/*
echo "done cleaning from /private/var/folders"
#ios photo caches
echo "cleaning ios photo caches"
rm -rf ~/Pictures/iPhoto\ Library/iPod\ Photo\ Cache/*
echo "done cleaning from ~/Pictures/iPhoto Library/iPod Photo Cache"
#application caches
echo "cleaning application caches"
for x in $(ls ~/Library/Containers/) 
do 
    echo "cleaning ~/Library/Containers/$x/Data/Library/Caches/"
    rm -rf ~/Library/Containers/$x/Data/Library/Caches/*
    echo "done cleaning ~/Library/Containers/$x/Data/Library/Caches"
done
echo "done cleaning for application caches"

Docker alias

alias dm='docker-machine'
alias dmx='docker-machine ssh'
alias dk='docker'
alias dki='docker images'
alias dks='docker service'
alias dkrm='docker rm'
alias dkl='docker logs'
alias dklf='docker logs -f'
alias dkflush='docker rm `docker ps --no-trunc -aq`'
alias dkflush2='docker rmi $(docker images --filter "dangling=true" -q --no-trunc)'
alias dkt='docker stats --format "table {{.Name}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.NetIO}}"'
alias dkps="docker ps --format '{{.ID}} ~ {{.Names}} ~ {{.Status}} ~ {{.Image}}'"

kubectl commands from slack

How cool it is to run the kubectlcommands from slack channel… 🙂
This is not fully developed yet, but it comes in handy with dev, staging ENV.
Let’s Begin.

Requirements:
1. create a new slack bot
2. Create Slack Channel(not private), and get the channel ID.
https://slack.com/api/channels.list?token=REPLACE_TOKEN&pretty=1 to get the Channel ID here.
3. Add Slack-bot to the channel you created.
4. Then use the below file to create K8s Deployment.

---
#create servuce account
apiVersion: v1
kind: ServiceAccount
metadata:
  name: kubebot-user
  namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubebot-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
#  Create ClusterRole with Ref NOTE : clsuter-admin is more powerful
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: kubebot-user
  namespace: default
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: kubebot
  labels:
    component: kubebot
spec:
  replicas: 1
  template:
    metadata:
      labels:
        component: kubebot
    spec:
      serviceAccountName: kubebot-user
      containers:
      - name: kubebot
        image: harbur/kubebot:0.1.0
        imagePullPolicy: Always
        env:
        # Create a secret with your slack bot token and reference it here
        - name: KUBEBOT_SLACK_TOKEN
          value: TOKENID_THAT_WAS CREATED
        # Alternatively, use this instead if you don't need to put channel ids in a secret; use a space as a separator
        # You get this from  https://slack.com/api/channels.list?token=REPLACE_TOKEN&pretty=1 
        - name: KUBEBOT_SLACK_CHANNELS_IDS
          value: 'AABBCCDD'          
        # Specify slack admins that kubebot should listen to; use a space as a separator
        - name: KUBEBOT_SLACK_ADMINS_NICKNAMES
          value: 'jag test someone'
        # Specify valid kubectl commands that kubebot should support; use a space as a separator
        - name: KUBEBOT_SLACK_VALID_COMMANDS
          value: "get describe logs explain"

Let me know if this help.

trigger(s) associated with a view or a table in PostgreSQL

This will return all the details you want to know

select * from information_schema.triggers

or if you want to sort the results of a specific table then you can try

SELECT event_object_table
      ,trigger_name
      ,event_manipulation
      ,action_statement
      ,action_timing
FROM  information_schema.triggers
WHERE event_object_table = 'tableName'
ORDER BY event_object_table
     ,event_manipulation

the following will return table name that has a trigger

select relname as table_with_trigger
from pg_class
where pg_class.oid in (
        select tgrelid
        from pg_trigger
        )

kubernetes Multiline logs for Elasticsearch (Kibana)

If you’re having issues with Kubernetes Multiline logs here is the solution for you.

The Kubernetes autodiscover provider watches for Kubernetes pods to start, update, and stop. These are the available fields on every event:

      • host

     

      • port

     

      • kubernetes.container.id

     

      • kubernetes.container.image

     

      • kubernetes.container.name

     

      • kubernetes.labels

     

      • kubernetes.namespace

     

      • kubernetes.node.name

     

    • kubernetes.pod.name

If the include_annotations config is added to the provider config, then the list of annotations present in the config are added to the event.

By using below Yaml file you can post all K8s logs to ElasticSearch(hosted or in-house).

---

apiVersion: v1

kind: ConfigMap

metadata:

  name: filebeat-config

  namespace: kube-system

  labels:

    k8s-app: filebeat

data:

  filebeat.yml: |-

    filebeat.config:

      inputs:

        # Mounted `filebeat-inputs` configmap:

        path: ${path.config}/inputs.d/*.yml

        # Reload inputs configs as they change:

        reload.enabled: false

      modules:

        path: ${path.config}/modules.d/*.yml

        # Reload module configs as they change:

        reload.enabled: false

    # To enable hints based autodiscover, remove `filebeat.config.inputs` configuration and uncomment this:

    #          hints.enabled: true

    filebeat.autodiscover:

      providers:

        - type: kubernetes

          templates:

            - condition:

                or:

                  - equals:

                      kubernetes.namespace: default
              config:

                - type: docker

                  containers.ids:

                    - "${data.kubernetes.container.id}"

                  multiline.pattern: '^[[:space:]]'

                  multiline.negate: false

                  multiline.match: after

    processors:

      - add_cloud_metadata:

    cloud.id: ${ELASTIC_CLOUD_ID}

    cloud.auth: ${ELASTIC_CLOUD_AUTH}

    output.elasticsearch:

      hosts: ['${ELASTICSEARCH_HOST:elasticsearch}:${ELASTICSEARCH_PORT:9243}']
#use https:// if it is hosted ES

      username: ${ELASTICSEARCH_USERNAME}

      password: ${ELASTICSEARCH_PASSWORD}

---

apiVersion: v1

kind: ConfigMap

metadata:

  name: filebeat-inputs

  namespace: kube-system

  labels:

    k8s-app: filebeat

data:

  kubernetes.yml: |-

    - type: docker

      containers.path: "/var/lib/docker/containers"

      symlinks: true

      containers.ids:

      - "*"

      multiline.pattern: '^DEBUG'

      multiline.negate: true

      multiline.match: 'after'

      processors:

        - add_kubernetes_metadata:

            in_cluster: true

---

apiVersion: extensions/v1beta1

kind: DaemonSet

metadata:

  name: filebeat

  namespace: kube-system

  labels:

    k8s-app: filebeat

spec:

  template:

    metadata:

      labels:

        k8s-app: filebeat

    spec:

      serviceAccountName: filebeat

      terminationGracePeriodSeconds: 30

      containers:

      - name: filebeat

        image: docker.elastic.co/beats/filebeat:6.4.0

        args: [

          "-c", "/etc/filebeat.yml",

          "-e",

        ]

        env:

        - name: ELASTICSEARCH_HOST

          value: "elasticsearch"    #use https:// if it is hosted ES

        - name: ELASTICSEARCH_PORT

          value: "9243"

        - name: ELASTICSEARCH_USERNAME

          value: "elasticsearch"

        - name: ELASTICSEARCH_PASSWORD

          value: "changeme"

        - name: ELASTIC_CLOUD_ID

          value:

        - name: ELASTIC_CLOUD_AUTH

          value:

        securityContext:

          runAsUser: 0

        resources:

          limits:

            memory: 200Mi

          requests:

            cpu: 100m

            memory: 100Mi

        volumeMounts:

        - name: config

          mountPath: /etc/filebeat.yml

          readOnly: true

          subPath: filebeat.yml

        - name: inputs

          mountPath: /usr/share/filebeat/inputs.d

          readOnly: true

        - name: data

          mountPath: /usr/share/filebeat/data

        - name: varlogpods

          mountPath: /var/log/pods

          readOnly: true

        - name: varlogcontainers

          mountPath: /var/log/containers

        - name: varlibdockercontainers

          mountPath: /var/lib/docker/containers

          readOnly: true

      volumes:

      - name: config

        configMap:

          defaultMode: 0600

          name: filebeat-config

      - name: varlogpods

        hostPath:

          path: /var/log/pods

      - name: varlogcontainers

        hostPath:

          path: /var/log/containers

      - name: varlibdockercontainers

        hostPath:

          path: /var/lib/docker/containers

      - name: inputs

        configMap:

          defaultMode: 0600

          name: filebeat-inputs

      # data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart

      - name: data

        hostPath:

          path: /var/lib/filebeat-data

          type: DirectoryOrCreate

---

apiVersion: rbac.authorization.k8s.io/v1beta1

kind: ClusterRoleBinding

metadata:

  name: filebeat

subjects:

- kind: ServiceAccount

  name: filebeat

  namespace: kube-system

roleRef:

  kind: ClusterRole

  name: filebeat

  apiGroup: rbac.authorization.k8s.io

---

apiVersion: rbac.authorization.k8s.io/v1beta1

kind: ClusterRole

metadata:

  name: filebeat

  labels:

    k8s-app: filebeat

rules:

- apiGroups: [""] # "" indicates the core API group

  resources:

  - namespaces

  - pods

  verbs:

  - get

  - watch

  - list

---

apiVersion: v1

kind: ServiceAccount

metadata:

  name: filebeat

  namespace: kube-system

  labels:

    k8s-app: filebeat

---

GitLab setup for Kubernetes

GitLab is the first single application built from the ground up for all stages of the DevOps lifecycle for Product, Development, QA, Security, and Operations teams to work concurrently on the same project. GitLab enables teams to collaborate and work from a single conversation, instead of managing multiple threads across different tools. GitLab provides teams with a single data store, one user interface, and one permission model across the DevOps lifecycle allowing teams to collaborate, significantly reducing cycle time and focus exclusively on building great software quickly.

Here is a quick Tutorial that will teach you how to deploy Gitlab within Kubernetes Environment.

Requirements:

1. Working Kubernetes Cluster

2. Storage Class: We will use it for stateful deployment.

Here Git repository that I used to build Gitlab.

#Clone gitlab_k8s repo

git clone https://github.com/jaganthoutam/gitlab_k8s.git

cd gitlab_k8s

gitlab-pvc.yaml Contains the pv,pvc voulmes for postgres,gitlab,redis..

Here I am using my NFS(nfs01.thoutam.loc) server…

---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: nfs-gitlab
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 20Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nfs-gitlab
spec:
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteMany
  nfs:
    server: nfs01.thoutam.loc
    # Exported path of your NFS server
    path: "/mnt/gitlab"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: nfs-gitlab-post
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 20Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nfs-gitlab-post
spec:
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteMany
  nfs:
    server: nfs01.thoutam.loc
    # Exported path of your NFS server
    path: "/mnt/gitlab-post"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: nfs-gitlab-redis
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 20Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nfs-gitlab-redis
spec:
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteMany
  nfs:
    server: nfs01.thoutam.loc
    # Exported path of your NFS server
    path: "/mnt/gitlab-redis"
---

You can use other storage classes based on your cloud Providers.

gitlab-rc.yml Contains Gitlab Deployment config:

apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: gitlab
spec:
  replicas: 1
#  selector:
#    name: gitlab
  template:
    metadata:
      name: gitlab
      labels:
        name: gitlab
    spec:
      containers:
      - name: gitlab
        image: jaganthoutam/gitlab:11.1.4
        env:
        - name: TZ
          value: Asia/Kolkata
        - name: GITLAB_TIMEZONE
          value: Kolkata

        - name: GITLAB_SECRETS_DB_KEY_BASE
          value: long-and-random-alpha-numeric-string  #CHANGE ME
        - name: GITLAB_SECRETS_SECRET_KEY_BASE
          value: long-and-random-alpha-numeric-string #CHANGE ME
        - name: GITLAB_SECRETS_OTP_KEY_BASE
          value: long-and-random-alpha-numeric-string #CHANGE ME


        - name: GITLAB_ROOT_PASSWORD
          value: password               #CHANGE ME
        - name: GITLAB_ROOT_EMAIL
          value: [email protected]        #CHANGE ME

        - name: GITLAB_HOST
          value: gitlab.lb.thoutam.loc  #CHANGE ME
        - name: GITLAB_PORT
          value: "80"
        - name: GITLAB_SSH_PORT
          value: "22"

        - name: GITLAB_NOTIFY_ON_BROKEN_BUILDS
          value: "true"
        - name: GITLAB_NOTIFY_PUSHER
          value: "false"

        - name: GITLAB_BACKUP_SCHEDULE
          value: daily
        - name: GITLAB_BACKUP_TIME
          value: 01:00

        - name: DB_TYPE
          value: postgres
        - name: DB_HOST
          value: postgresql
        - name: DB_PORT
          value: "5432"
        - name: DB_USER
          value: gitlab
        - name: DB_PASS
          value: passw0rd
        - name: DB_NAME
          value: gitlab_production

        - name: REDIS_HOST
          value: redis
        - name: REDIS_PORT
          value: "6379"

        - name: SMTP_ENABLED
          value: "false"
        - name: SMTP_DOMAIN
          value: www.example.com
        - name: SMTP_HOST
          value: smtp.gmail.com
        - name: SMTP_PORT
          value: "587"
        - name: SMTP_USER
          value: [email protected]
        - name: SMTP_PASS
          value: password
        - name: SMTP_STARTTLS
          value: "true"
        - name: SMTP_AUTHENTICATION
          value: login

        - name: IMAP_ENABLED
          value: "false"
        - name: IMAP_HOST
          value: imap.gmail.com
        - name: IMAP_PORT
          value: "993"
        - name: IMAP_USER
          value: [email protected]
        - name: IMAP_PASS
          value: password
        - name: IMAP_SSL
          value: "true"
        - name: IMAP_STARTTLS
          value: "false"
        ports:
        - name: http
          containerPort: 80
        - name: ssh
          containerPort: 22
        volumeMounts:
        - mountPath: /home/git/data
          name: data
        livenessProbe:
          httpGet:
            path: /
            port: 80
          initialDelaySeconds: 180
          timeoutSeconds: 5
        readinessProbe:
          httpGet:
            path: /
            port: 80
          initialDelaySeconds: 5
          timeoutSeconds: 1
      volumes:
      - name: data
        persistentVolumeClaim:
#NFS PVC name identifier
          claimName: nfs-gitlab

gitlab-svc.yml contains the gitlab services. I used 80,20 default ports.

apiVersion: v1
kind: Service
metadata:
  name: gitlab
  labels:
    name: gitlab
spec:
  type: NodePort
  ports:
    - name: http
      port: 80
      targetPort: http
    - name: ssh
      port: 22
      targetPort: ssh
  selector:
    name: gitlab

postgresql-rc.yml   Contains Postgres ReplicationController config :

apiVersion: v1
kind: ReplicationController
metadata:
  name: postgresql
spec:
  replicas: 1
  selector:
    name: postgresql
  template:
    metadata:
      name: postgresql
      labels:
        name: postgresql
    spec:
      containers:
      - name: postgresql
        image: jaganthoutam/postgresql:10
        env:
        - name: DB_USER
          value: gitlab
        - name: DB_PASS
          value: passw0rd
        - name: DB_NAME
          value: gitlab_production
        - name: DB_EXTENSION
          value: pg_trgm
        ports:
        - name: postgres
          containerPort: 5432
        volumeMounts:
        - mountPath: /var/lib/postgresql
          name: data
        livenessProbe:
          exec:
            command:
            - pg_isready
            - -h
            - localhost
            - -U
            - postgres
          initialDelaySeconds: 30
          timeoutSeconds: 5
        readinessProbe:
          exec:
            command:
            - pg_isready
            - -h
            - localhost
            - -U
            - postgres
          initialDelaySeconds: 5
          timeoutSeconds: 1
      volumes:
      - name: data
        persistentVolumeClaim:
# NFS PVC identifier
          claimName: nfs-gitlab-post

postgresql-svc.yml contains postgres service config :

apiVersion: v1
kind: Service
metadata:
  name: postgresql
  labels:
    name: postgresql
spec:
  ports:
    - name: postgres
      port: 5432
      targetPort: postgres
  selector:
    name: postgresql

redis-rc.yml contains redis ReplicationController config:

apiVersion: v1
kind: ReplicationController
metadata:
  name: redis
spec:
  replicas: 1
  selector:
    name: redis
  template:
    metadata:
      name: redis
      labels:
        name: redis
    spec:
      containers:
      - name: redis
        image: jaganthoutam/redis
        ports:
        - name: redis
          containerPort: 6379
        volumeMounts:
        - mountPath: /var/lib/redis
          name: data
        livenessProbe:
          exec:
            command:
            - redis-cli
            - ping
          initialDelaySeconds: 30
          timeoutSeconds: 5
        readinessProbe:
          exec:
            command:
            - redis-cli
            - ping
          initialDelaySeconds: 5
          timeoutSeconds: 1
      volumes:
      - name: data
        persistentVolumeClaim:
#NFS PVC identifier
          claimName: nfs-gitlab-redis

redis-svc.yml contain redis service config:

apiVersion: v1
kind: Service
metadata:
  name: redis
  labels:
    name: redis
spec:
  ports:
    - name: redis
      port: 6379
      targetPort: redis
  selector:
    name: redis

Change the configuration according to your needs and apply using kubectl.

kubectly apply -f .


#and check if your pods are running or not.

root@k8smaster-01:~# kubectl get po
NAME                        READY     STATUS    RESTARTS   AGE
gitlab-589cb45ff4-hch2g     1/1       Running   1          1d
postgres-55f6bcbb99-4x48g   1/1       Running   3          1d
postgresql-v2svn            1/1       Running   4          1d
redis-7r486                 1/1       Running   2          1d

Let me know if this helps you.