Need help with Hippo?
Click the “chat” button below for chat support from the developer who created it, or find similar developers for support.

About the developer

135 Stars 5 Forks MIT License 129 Commits 5 Opened issues


💨A well crafted go packages that help you build robust, reliable, maintainable microservices.

Services available


Need anything else?

Contributors list

# 37,947
102 commits

Hippo Logo


A Microservices Toolkit.

Hippo is a collection of well crafted go packages that help you build robust, reliable, maintainable microservices. It is not a full-fledged framework with lot of magic, predefined architecture, specific patterns and bullshit opinions so you will be the one behind the wheel.

It provides libraries to implement components for service discovery, async jobs, authentication, authorization, logging, caching, metrics, tracing, rate-limiting...etc which are essential requirements for running microservices in production.



go get -u
import (


HTTP Requests Component

httpClient := hippo.NewHTTPClient()

// Get Request response, err := httpClient.Get( "", map[string]string{"url_arg_key": "url_arg_value"}, map[string]string{"header_key": "header_value"}, )

// Delete Request response, err := httpClient.Delete( "", map[string]string{"url_arg_key": "url_arg_value"}, map[string]string{"header_key": "header_value"}, )

// Post Request response, err := httpClient.Post( "", {"RequestBodyKey":"RequestBodyValue"}, map[string]string{"url_arg_key": "url_arg_value"}, map[string]string{"header_key": "header_value"}, )

// Put Request response, err := httpClient.Put( "", {"RequestBodyKey":"RequestBodyValue"}, map[string]string{"url_arg_key": "url_arg_value"}, map[string]string{"header_key": "header_value"}, )

// ....

statusCode := httpClient.GetStatusCode(response) responseBody, err := httpClient.ToString(response)

Cache/Redis Component

driver := hippo.NewRedisDriver("localhost:6379", "password", 0)

// connect to redis server ok, err := driver.Connect() // ping check ok, err = driver.Ping()

// set an item ok, err = driver.Set("app_name", "Hippo", 0) // check if exists ok, err = driver.Exists("app_name") // get value value, err := driver.Get("app_name") // delete an item count, err := driver.Del("app_name")

// hash set ok, err = driver.HSet("configs", "app_name", "Hippo") // check if item on a hash ok, err = driver.HExists("configs", "app_name") // get item from a hash value, err = driver.HGet("configs", "app_name") // hash length count, err = driver.HLen("configs") // delete item from a hash count, err = driver.HDel("configs", "app_name") // clear the hash count, err = driver.HTruncate("configs")

// Pub/Sub driver.Publish("hippo", "Hello") driver.Subscribe("hippo", func(message hippo.Message) error { // message.Channel // message.Payload return nil })

Time Series/Graphite Component

import "time"

metric := hippo.NewMetric("hippo1.up", "23", time.Now().Unix()) // Type is hippo.Metric

metrics := hippo.NewMetrics("hippo2.up", "35", time.Now().Unix()) // type is []hippo.Metric metrics = append(metrics, hippo.NewMetric("hippo2.down", "40", time.Now().Unix())) metrics = append(metrics, hippo.NewMetric("hippo2.error", "70", time.Now().Unix()))

// NewGraphite(protocol string, host string, port int, prefix string) // protocol can be tcp, udp or nop // prefix is a metric prefix graphite := hippo.NewGraphite("tcp", "", 2003, "") error := graphite.Connect()

if error == nil{ // send one by one graphite.SendMetric(metric)

// bulk send


System Stats Component

// func NewSystemStats(enableCPU, enableMem, enableGC bool) *SystemStats {
stats := hippo.NewSystemStats(true, true, true)
stats.GetStats() // type map[string]uint64
// map[cpu.cgo_calls:0 cpu.goroutines:1 mem.alloc:0....]

Correlation ID Component

correlation := hippo.NewCorrelation()

Workers Pool Component

import "fmt"

tasks := []*hippo.Task{ hippo.NewTask(func() (string, error) { fmt.Println("Task #1") return "Result 1", nil }), hippo.NewTask(func() (string, error) { fmt.Println("Task #2") return "Result 2", nil }), hippo.NewTask(func() (string, error) { fmt.Println("Task #3") return "Result 3", nil }), }

// hippo.NewWorkersPool(tasks []*Task, concurrency int) *WorkersPool p := hippo.NewWorkersPool(tasks, 2) p.Run()

var numErrors int for _, task := range p.Tasks { if task.Err != nil { fmt.Println(task.Err) numErrors++ } else { fmt.Println(task.Result) } if numErrors >= 10 { fmt.Println("Too many errors.") break } }

Health Checker Component

import "fmt"

healthChecker := hippo.NewHealthChecker() healthChecker.AddCheck("ping_check", func() (bool, error){ return true, nil }) healthChecker.AddCheck("db_check", func() (bool, error){ return false, fmt.Errorf("Database Down") }) healthChecker.RunChecks()

fmt.Println(healthChecker.ChecksStatus()) // Output -> DOWN fmt.Println(healthChecker.ChecksReport()) // Output -> [{"id":"ping_check","status":"UP","error":"","result":true},{"id":"db_check","status":"DOWN","error":"Database Down","result":false}]

import "fmt"

healthChecker := hippo.NewHealthChecker()

healthChecker.AddCheck("url_check", func() (bool, error){ return hippo.HTTPCheck("httpbin_service", "", map[string]string{}, map[string]string{}) }) healthChecker.AddCheck("redis_check", func() (bool, error){ return hippo.RedisCheck("redis_service", "localhost:6379", "", 0) }) healthChecker.RunChecks()

fmt.Println(healthChecker.ChecksStatus()) // Outputs -> DOWN fmt.Println(healthChecker.ChecksReport()) // Outputs -> [{"id":"url_check","status":"DOWN","error":"Service httpbin_service is unavailable","result":false},{"id":"redis_check","status":"DOWN","error":"Error while connecting redis_service: dial tcp [::1]:6379: connect: connection refused","result":false}]

API Rate Limiting

import "time"

// Create a limiter with a specific identifier(IP address or access token or username....etc) // NewCallerLimiter(identifier string, eventsRate rate.Limit, tokenBurst int) *rate.Limiter limiter := hippo.NewCallerLimiter("", 100, 1) if limiter.Allow() == false { // Don't allow access } else { // Allow Access }

// auto clean old clients (should run as background process) // CleanupCallers(cleanAfter time.Duration) go func(){ for { time.Sleep(60 * time.Second) hippo.CleanupCallers(60) } }()

Logger Component

logger, _ := hippo.NewLogger("debug", "json", []string{"stdout", "/var/log/error.log"})

logger.Info("Hello World!") logger.Debug("Hello World!") logger.Warn("Hello World!") logger.Error("Hello World!")

defer logger.Sync()

// check if path exists exists := hippo.PathExists("/var/log")

// check if file exists exists := hippo.FileExists("/var/log/error.log")

// check if dir exists exists := hippo.DirExists("/var/log")

// ensure that dir exists exists, err := hippo.EnsureDir("/var/log", 755)

Latency Tracker Component

httpClient := hippo.NewHTTPClient()

latency := hippo.NewLatencyTracker() latency.NewAction("")

// First HTTP Call start := time.Now() httpClient.Get( "", map[string]string{}, map[string]string{}, ) latency.SetPoint("", start, time.Now())

// Another HTTP Call latency.SetStart("", time.Now()) httpClient.Get( "", map[string]string{}, map[string]string{}, ) latency.SetEnd("", time.Now())

// Now it will calculate the average fmt.Println(latency.GetLatency("")) // Output 486.217112ms


For transparency into our release cycle and in striving to maintain backward compatibility, Hippo is maintained under the Semantic Versioning guidelines and release process is predictable and business-friendly.

See the Releases section of our GitHub project for changelogs for each release version of Hippo. It contains summaries of the most noteworthy changes made in each release.

Bug tracker

If you have any suggestions, bug reports, or annoyances please report them to our issue tracker at

Security Issues

If you discover a security vulnerability within Hippo, please send an email to [email protected]


We are an open source, community-driven project so please feel free to join us. see the contributing guidelines for more details.


© 2019, Clivern. Released under MIT License.

Hippo is authored and maintained by @Clivern.

We use cookies. If you continue to browse the site, you agree to the use of cookies. For more information on our use of cookies please see our Privacy Policy.