CWE-770 in Fiber
How Cwe 770 Manifests in Fiber
CWE-770 (Allocation of Resources Without Limits or Throttling) in Fiber applications typically appears as unbounded memory consumption or connection exhaustion. In Fiber, this vulnerability often manifests in several specific ways:
Unbounded request body parsing is a common pattern. When using Fiber's default context.BodyParser(), large or malicious payloads can consume excessive memory:
func uploadHandler(c *fiber.Ctx) error {
// No size limit - vulnerable to memory exhaustion
file, err := c.FormFile("file")
if err != nil {
return err
}
// Process file without size validation
return c.JSON(fiber.Map{"status": "uploaded"})
}
Unbounded database query results can also trigger resource exhaustion. Consider this Fiber endpoint:
func getAllUsers(c *fiber.Ctx) error {
// No LIMIT clause - retrieves entire user table
rows, err := db.Query("SELECT * FROM users")
if err != nil {
return err
}
defer rows.Close()
var users []User
for rows.Next() {
var u User
err := rows.Scan(&u.ID, &u.Name, &u.Email)
if err != nil {
return err
}
users = append(users, u)
}
return c.JSON(users) // Returns potentially massive dataset
}
Unbounded goroutine creation is another manifestation. Fiber's async nature means unhandled request patterns can spawn excessive goroutines:
func processHeavyTask(c *fiber.Ctx) error {
// No worker pool or rate limiting
go heavyProcessing(c.Query("data")) // Can spawn unlimited goroutines
return c.JSON(fiber.Map{"status": "processing"})
}
func heavyProcessing(data string) {
// Long-running operation without context cancellation
time.Sleep(10 * time.Second)
}
Unbounded file operations represent another attack vector:
func downloadFile(c *fiber.Ctx) error {
filename := c.Query("filename")
file, err := os.Open(filename) // No path validation
if err != nil {
return err
}
defer file.Close()
// Stream entire file without size limits
return c.SendStream(file)
}
Fiber-Specific Detection
Detecting CWE-770 in Fiber applications requires both static analysis and runtime monitoring. Here's how to identify these vulnerabilities:
Runtime memory monitoring can be implemented using Fiber middleware:
func memoryMonitor() fiber.Handler {
return func(c *fiber.Ctx) error {
startMem := getCurrentMemoryUsage()
err := c.Next()
endMem := getCurrentMemoryUsage()
memDelta := endMem - startMem
if memDelta > 50*1024*1024 { // 50MB threshold
log.Printf("High memory usage: %d bytes", memDelta)
// Consider adding rate limiting or blocking
}
return err
}
}
func getCurrentMemoryUsage() uint64 {
var m runtime.MemStats
runtime.ReadMemStats(&m)
return m.Alloc
}
Request size limiting is essential for preventing unbounded body parsing:
func setupFiberApp() *fiber.App {
app := fiber.New()
// Set maximum request body size (10MB)
app.Use(func(c *fiber.Ctx) error {
c.Request().Header.ContentLength()
if c.Request().Header.ContentLength() > 10*1024*1024 {
return c.Status(fiber.StatusRequestEntityTooLarge).SendString("Payload too large")
}
return c.Next()
})
return app
}
Database query limits should be enforced at the application layer:
func getUsersWithLimit(c *fiber.Ctx) error {
limit := c.Query("limit", "100")
limitInt, err := strconv.Atoi(limit)
if err != nil || limitInt > 1000 {
return c.Status(fiber.StatusBadRequest).SendString("Invalid limit")
}
// Use prepared statements with LIMIT
rows, err := db.Query("SELECT * FROM users LIMIT ?", limitInt)
if err != nil {
return err
}
defer rows.Close()
var users []User
for rows.Next() {
var u User
err := rows.Scan(&u.ID, &u.Name, &u.Email)
if err != nil {
return err
}
users = append(users, u)
}
return c.JSON(users)
}
Rate limiting middleware prevents unbounded request patterns:
func rateLimitMiddleware() fiber.Handler {
limiter := redis.NewLimiter(&redis.LimiterConfig{
Max: 100, // 100 requests
Duration: 60, // per 60 seconds
Redis: redis.NewRedis(redisConfig),
})
return limiter.NewMiddleware()
}
Fiber-Specific Remediation
Remediating CWE-770 in Fiber requires implementing limits and throttling mechanisms. Here are specific solutions:
Request body size limits using Fiber's built-in configuration:
func setupAppWithLimits() *fiber.App {
app := fiber.New(fiber.Config{
// Limit request body to 10MB
BodyLimit: 10 * 1024 * 1024,
// Limit concurrent connections
Concurrency: 1000,
// Set read timeout
ReadTimeout: 10 * time.Second,
// Set write timeout
WriteTimeout: 30 * time.Second,
})
return app
}
Worker pool for goroutine management:
type WorkerPool struct {
workerCount int
jobs chan func()
results chan interface{}
}
func NewWorkerPool(size int) *WorkerPool {
wp := &WorkerPool{
workerCount: size,
jobs: make(chan func(), 100),
results: make(chan interface{}, 100),
}
for i := 0; i < size; i++ {
go func() {
for job := range wp.jobs {
result := job()
wp.results <- result
}
}()
}
return wp
}
func (wp *WorkerPool) SubmitJob(job func()) {
wp.jobs <- job
}
// Usage in Fiber handler
func processWithPool(c *fiber.Ctx) error {
wp := getWorkerPool()
// Submit job to pool instead of spawning unlimited goroutines
wp.SubmitJob(func() {
heavyProcessing(c.Query("data"))
})
return c.JSON(fiber.Map{"status": "processing"})
}
Streaming responses for large datasets:
func streamUsers(c *fiber.Ctx) error {
// Set limit for streaming
limit := 100
rows, err := db.Query("SELECT * FROM users LIMIT ?", limit)
if err != nil {
return err
}
defer rows.Close()
c.Response().SetContentType(fiber.MIMEApplicationJSON)
c.Response().Write([]byte("["))
first := true
for rows.Next() {
if !first {
c.Response().Write([]byte(","))
}
first = false
var u User
err := rows.Scan(&u.ID, &u.Name, &u.Email)
if err != nil {
return err
}
userJSON, _ := json.Marshal(u)
c.Response().Write(userJSON)
// Flush to client
c.Response().Flush()
}
c.Response().Write([]byte("]"))
return c.SendStatus(fiber.StatusPartialContent)
}
Context-based cancellation for long-running operations:
func processWithTimeout(c *fiber.Ctx) error {
// Create context with timeout
ctx, cancel := context.WithTimeout(c.Context(), 5*time.Second)
defer cancel()
// Use context in database operations
rows, err := db.QueryContext(ctx, "SELECT * FROM users LIMIT 100")
if err != nil {
return err
}
defer rows.Close()
var users []User
for rows.Next() {
var u User
err := rows.Scan(&u.ID, &u.Name, &u.Email)
if err != nil {
return err
}
users = append(users, u)
}
return c.JSON(users)
}