Home > Developer Guide > Features > Camera Scoring > Android Implementation
Android Camera Scoring Implementation
Purpose: Document the Android-specific implementation of camera-based arrow scoring
PR: #354
Key Technologies: TensorFlow Lite, CameraX, Jetpack Compose
Architecture Overview
┌─────────────────────────────────────────────────────────────────┐
│ CameraScoringFlow │
│ (Orchestrates permission → capture → detect → adjust → submit) │
└─────────────────────────────────────────────────────────────────┘
│
┌─────────────────────┼─────────────────────┐
▼ ▼ ▼
┌───────────────┐ ┌───────────────┐ ┌───────────────────┐
│ CameraX │ │ ArrowDetection│ │ ArrowAdjustment │
│ (TakePicture) │ │ Service │ │ Screen │
└───────────────┘ └───────────────┘ └───────────────────┘
│ │
▼ ▼
┌───────────────┐ ┌───────────────────┐
│ TFLite │ │ ArrowAdjustment │
│ Interpreter │ │ ViewModel │
└───────────────┘ └───────────────────┘
Dependencies
Add to gradle/libs.versions.toml:
[versions]
tflite = "2.14.0"
tflite-support = "0.4.4"
camerax = "1.3.0"
[libraries]
tensorflow-lite = { group = "org.tensorflow", name = "tensorflow-lite", version.ref = "tflite" }
tensorflow-lite-support = { group = "org.tensorflow", name = "tensorflow-lite-support", version.ref = "tflite-support" }
camerax-core = { group = "androidx.camera", name = "camera-core", version.ref = "camerax" }
camerax-camera2 = { group = "androidx.camera", name = "camera-camera2", version.ref = "camerax" }
camerax-lifecycle = { group = "androidx.camera", name = "camera-lifecycle", version.ref = "camerax" }Add to app/build.gradle.kts:
dependencies {
implementation(libs.tensorflow.lite)
implementation(libs.tensorflow.lite.support)
implementation(libs.camerax.core)
implementation(libs.camerax.camera2)
implementation(libs.camerax.lifecycle)
}
android {
aaptOptions {
noCompress += "tflite"
}
}ML Model Integration
Model File Location
Place the model at: app/src/main/assets/ml/arrow_detector.tflite
ArrowDetectionService
Location: domain/camera/ArrowDetectionService.kt
Key Configuration:
companion object {
private const val MODEL_FILE = "ml/arrow_detector.tflite"
private const val INPUT_SIZE = 640 // YOLOv8 standard
private const val NUM_CHANNELS = 3 // RGB
private const val NUM_BYTES_PER_CHANNEL = 4 // Float32
private const val CONFIDENCE_THRESHOLD = 0.35f // Lowered for better detection
private const val IOU_THRESHOLD = 0.5f // Lowered to reduce false duplicates
private const val NUM_DETECTIONS = 8400 // YOLOv8s detection grid
private const val LETTERBOX_COLOR = 114 // Gray padding for aspect ratio preservation
}Initialization:
suspend fun initialize(): Result<Unit> = withContext(Dispatchers.IO) {
try {
val modelBuffer = FileUtil.loadMappedFile(context, MODEL_FILE)
val options = Interpreter.Options().apply {
setNumThreads(4) // Use 4 CPU threads
}
interpreter = Interpreter(modelBuffer, options)
Result.success(Unit)
} catch (e: Exception) {
Result.failure(ArrowDetectionException("Failed to initialize", e))
}
}Image Preprocessing with Letterboxing
The YOLO model expects 640×640 square input. To maintain aspect ratio and avoid distortion, images are letterboxed (padded with gray) before inference:
/**
* Letterbox preprocessing preserves aspect ratio by:
* 1. Scaling the image to fit within 640x640 while maintaining aspect ratio
* 2. Padding the shorter dimension with gray (114, 114, 114)
* 3. Storing scale and offset for coordinate transformation
*/
data class LetterboxResult(
val bitmap: Bitmap,
val scale: Float,
val padX: Float,
val padY: Float
)
private fun letterboxImage(bitmap: Bitmap): LetterboxResult {
val scale = minOf(
INPUT_SIZE.toFloat() / bitmap.width,
INPUT_SIZE.toFloat() / bitmap.height
)
val scaledWidth = (bitmap.width * scale).toInt()
val scaledHeight = (bitmap.height * scale).toInt()
val padX = (INPUT_SIZE - scaledWidth) / 2f
val padY = (INPUT_SIZE - scaledHeight) / 2f
// Create gray canvas and draw scaled image centered
val letterboxed = Bitmap.createBitmap(INPUT_SIZE, INPUT_SIZE, Bitmap.Config.ARGB_8888)
val canvas = Canvas(letterboxed)
canvas.drawColor(Color.rgb(LETTERBOX_COLOR, LETTERBOX_COLOR, LETTERBOX_COLOR))
val scaledBitmap = Bitmap.createScaledBitmap(bitmap, scaledWidth, scaledHeight, true)
canvas.drawBitmap(scaledBitmap, padX, padY, null)
return LetterboxResult(letterboxed, scale, padX, padY)
}
private fun preprocessImage(bitmap: Bitmap): ByteBuffer {
val inputBuffer = ByteBuffer.allocateDirect(
1 * INPUT_SIZE * INPUT_SIZE * NUM_CHANNELS * NUM_BYTES_PER_CHANNEL
).apply {
order(ByteOrder.nativeOrder())
}
val pixels = IntArray(INPUT_SIZE * INPUT_SIZE)
bitmap.getPixels(pixels, 0, INPUT_SIZE, 0, 0, INPUT_SIZE, INPUT_SIZE)
for (pixel in pixels) {
val r = ((pixel shr 16) and 0xFF) / 255.0f
val g = ((pixel shr 8) and 0xFF) / 255.0f
val b = (pixel and 0xFF) / 255.0f
inputBuffer.putFloat(r)
inputBuffer.putFloat(g)
inputBuffer.putFloat(b)
}
inputBuffer.rewind()
return inputBuffer
}Coordinate Transformation:
After inference, detected coordinates must be transformed back to original image space:
// Transform from letterboxed coordinates to original image coordinates
val originalX = (rawX - padX) / scale
val originalY = (rawY - padY) / scalePost-Processing
YOLO Output Parsing:
private fun postProcessResults(outputMap: Map<Int, Any>, originalWidth: Int, originalHeight: Int): List<RawDetection> {
val output = outputMap[0] as Array<Array<FloatArray>>
val detections = mutableListOf<RawDetection>()
val scaleX = originalWidth.toFloat() / INPUT_SIZE
val scaleY = originalHeight.toFloat() / INPUT_SIZE
for (i in 0 until NUM_DETECTIONS) {
val confidence = output[0][4][i]
if (confidence >= CONFIDENCE_THRESHOLD) {
// YOLO outputs: center_x, center_y, width, height
val cx = output[0][0][i] * scaleX
val cy = output[0][1][i] * scaleY
val w = output[0][2][i] * scaleX
val h = output[0][3][i] * scaleY
detections.add(RawDetection(
box = RectF(cx - w/2, cy - h/2, cx + w/2, cy + h/2),
confidence = confidence,
centerX = cx,
centerY = cy
))
}
}
return applyNMS(detections)
}Non-Maximum Suppression (NMS):
private fun applyNMS(detections: List<RawDetection>): List<RawDetection> {
val sorted = detections.sortedByDescending { it.confidence }.toMutableList()
val selected = mutableListOf<RawDetection>()
while (sorted.isNotEmpty()) {
val best = sorted.removeAt(0)
selected.add(best)
sorted.removeAll { detection ->
calculateIoU(best.box, detection.box) > IOU_THRESHOLD
}
}
return selected
}Data Models
ArrowDetectionResult
Location: domain/camera/ArrowDetectionResult.kt
@Serializable
data class ArrowDetectionResult(
val version: String = "1.0",
val timestamp: String,
@SerialName("target_detection")
val targetDetection: TargetDetection,
val arrows: List<DetectedArrow>,
@SerialName("image_metadata")
val imageMetadata: ImageMetadata
)DetectedArrow
@Serializable
data class DetectedArrow(
val id: Int,
@SerialName("normalized_x")
val normalizedX: Float, // -1 to +1
@SerialName("normalized_y")
val normalizedY: Float, // -1 to +1
@SerialName("pixel_x")
val pixelX: Float,
@SerialName("pixel_y")
val pixelY: Float,
@SerialName("distance_from_center")
val distanceFromCenter: Float,
@SerialName("clock_position_deg")
val clockPositionDeg: Float,
val confidence: Float,
@SerialName("bounding_box")
val boundingBox: BoundingBox
) {
fun calculateScore(): Int {
return when {
distanceFromCenter <= 0.05f -> 10 // X-ring
distanceFromCenter <= 0.10f -> 10
distanceFromCenter <= 0.20f -> 9
// ... etc
else -> 0 // Miss
}
}
fun isXRing(): Boolean = distanceFromCenter <= 0.05f
}EditableArrow
Mutable version for the adjustment UI:
data class EditableArrow(
val id: Int,
var normalizedX: Float,
var normalizedY: Float,
var isManuallyPlaced: Boolean = false,
val originalConfidence: Float
) {
fun distanceFromCenter(): Float =
sqrt(normalizedX * normalizedX + normalizedY * normalizedY)
fun isOffTarget(): Boolean = distanceFromCenter() > 1.0f
companion object {
fun fromDetectedArrow(arrow: DetectedArrow): EditableArrow
fun createManualPlacement(id: Int, normalizedX: Float, normalizedY: Float): EditableArrow
}
}Camera Scoring Flow
Location: ui/camera/CameraScoringFlow.kt
State Machine
sealed class CameraScoringState {
object RequestingPermission : CameraScoringState()
object Capturing : CameraScoringState()
object Processing : CameraScoringState()
data class Adjusting(val result: ArrowDetectionResult) : CameraScoringState()
data class Error(val message: String) : CameraScoringState()
}Flow Composable
@Composable
fun CameraScoringFlow(
expectedArrowCount: Int,
onScoresConfirmed: (ConfirmedArrowPlacements) -> Unit,
onCancel: () -> Unit,
modifier: Modifier = Modifier
) {
val context = LocalContext.current
var state by remember { mutableStateOf<CameraScoringState>(CameraScoringState.RequestingPermission) }
val detectionService = remember { ArrowDetectionService(context) }
val adjustmentViewModel: ArrowAdjustmentViewModel = viewModel()
// Permission launcher
val permissionLauncher = rememberLauncherForActivityResult(
contract = ActivityResultContracts.RequestPermission()
) { granted ->
if (granted) {
state = CameraScoringState.Capturing
// Launch camera
} else {
state = CameraScoringState.Error("Camera permission required")
}
}
// Camera launcher
val cameraLauncher = rememberLauncherForActivityResult(
contract = ActivityResultContracts.TakePicture()
) { success ->
if (success) {
state = CameraScoringState.Processing
// Process image
}
}
// State-based rendering
when (val currentState = state) {
is CameraScoringState.Adjusting -> {
ArrowAdjustmentScreen(
viewModel = adjustmentViewModel,
onConfirm = onScoresConfirmed,
onCancel = onCancel
)
}
// ... other states
}
}Adjustment UI
Location: ui/camera/ArrowAdjustmentScreen.kt
Screen Layout
@Composable
fun ArrowAdjustmentScreen(
viewModel: ArrowAdjustmentViewModel,
onConfirm: (ConfirmedArrowPlacements) -> Unit,
onCancel: () -> Unit
) {
val state by viewModel.state.collectAsState()
Column(modifier = Modifier.fillMaxSize().padding(16.dp)) {
// Score preview header
ScorePreviewHeader(state = state)
// Warning banners
if (state.showExtraArrowsWarning) {
ExtraArrowsWarning(extraCount = state.extraCount)
}
if (state.showMissingArrowsBanner) {
MissingArrowsBanner(
missingCount = state.missingCount,
isPlacementMode = state.isPlacementMode,
onEnterPlacementMode = { viewModel.togglePlacementMode() }
)
}
// Target face with arrows
TargetFaceWithArrows(
arrows = state.arrows,
isPlacementMode = state.isPlacementMode,
onArrowDrag = viewModel::updateArrowPosition,
onArrowDragEnd = viewModel::finalizeArrowPosition,
onTargetTap = { x, y -> viewModel.addManualArrow(x, y) }
)
// Action buttons
Row {
OutlinedButton(onClick = onCancel) { Text("Cancel") }
Button(onClick = { onConfirm(viewModel.confirmPlacements()) }) { Text("Confirm") }
}
}
}Target Face Drawing
@Composable
private fun TargetFaceWithArrows(...) {
Canvas(modifier = Modifier.fillMaxSize()) {
val center = Offset(size.width / 2, size.height / 2)
val maxRadius = size.minDimension / 2
// Standard 10-ring target colors
val ringColors = listOf(
Color(0xFFFFFFFF), // Ring 1: White
Color(0xFFFFFFFF), // Ring 2: White
Color(0xFF000000), // Ring 3: Black
Color(0xFF000000), // Ring 4: Black
Color(0xFF00B4D8), // Ring 5: Blue
Color(0xFF00B4D8), // Ring 6: Blue
Color(0xFFFF0000), // Ring 7: Red
Color(0xFFFF0000), // Ring 8: Red
Color(0xFFFFD700), // Ring 9: Gold
Color(0xFFFFD700), // Ring 10: Gold
)
// Draw from outside in
for (i in 9 downTo 0) {
val ringRadius = maxRadius * ((i + 1) / 10f)
drawCircle(color = ringColors[9 - i], radius = ringRadius, center = center)
drawCircle(color = Color.Black, radius = ringRadius, center = center, style = Stroke(1.dp.toPx()))
}
// X-ring
drawCircle(color = Color(0xFFFFD700), radius = maxRadius * 0.05f, center = center)
}
}Draggable Arrow Marker
@Composable
private fun DraggableArrowMarker(
arrow: EditableArrow,
confidence: Float,
offset: IntOffset,
targetRadius: Float,
onDrag: (dx: Float, dy: Float) -> Unit,
onDragEnd: () -> Unit
) {
val markerColor = when {
arrow.isManuallyPlaced -> Color.Green
confidence >= 0.7f -> Color.Red
confidence >= 0.5f -> Color(0xFFFFA000) // Orange
else -> Color(0xFFFF5722) // Deep orange
}
Surface(
modifier = Modifier
.offset { offset }
.size(24.dp)
.pointerInput(arrow.id) {
detectDragGestures(
onDragEnd = { onDragEnd() },
onDrag = { change, amount ->
change.consume()
onDrag(amount.x, amount.y)
}
)
},
shape = CircleShape,
color = markerColor,
shadowElevation = 4.dp
) {
// Inner white dot
Surface(Modifier.size(8.dp), shape = CircleShape, color = Color.White) {}
}
}Critical: Drag Gesture Fix Pattern
Problem: The detectDragGestures callback captures the arrow position from render time (closure capture). When dragging, the callback uses stale coordinates, causing arrows to snap back to their original positions.
Solution: Use delta-based updates instead of absolute positions. Pass (arrowId, dx, dy, radius) to the ViewModel, which reads the current state and applies the delta:
// WRONG: Closure captures stale arrow.normalizedX/Y
onDrag = { _, dragAmount ->
val newX = arrow.normalizedX + dragAmount.x / radius // arrow is stale!
viewModel.updateArrowPosition(arrow.id, newX, newY)
}
// CORRECT: Pass delta to ViewModel, which uses current state
onDrag = { arrowId, dx, dy, radius ->
viewModel.updateArrowPositionDelta(arrowId, dx, dy, radius)
}
// ViewModel reads current state
fun updateArrowPositionDelta(arrowId: Int, dx: Float, dy: Float, radius: Float) {
_state.update { state ->
state.copy(arrows = state.arrows.map { arrow ->
if (arrow.id == arrowId) {
arrow.copy(
normalizedX = (arrow.normalizedX + dx / radius).coerceIn(-1.5f, 1.5f),
normalizedY = (arrow.normalizedY + dy / radius).coerceIn(-1.5f, 1.5f)
)
} else arrow
})
}
}This pattern ensures dragging uses the actual current position, not a captured snapshot.
Adjustment ViewModel
Location: ui/camera/ArrowAdjustmentViewModel.kt
State
data class ArrowAdjustmentState(
val arrows: List<EditableArrow> = emptyList(),
val expectedArrowCount: Int = 0,
val isPlacementMode: Boolean = false,
val showExtraArrowsWarning: Boolean = false,
val showMissingArrowsBanner: Boolean = false
) {
val detectedCount: Int get() = arrows.size
val missingCount: Int get() = (expectedArrowCount - arrows.size).coerceAtLeast(0)
val extraCount: Int get() = (arrows.size - expectedArrowCount).coerceAtLeast(0)
fun calculateTotalScore(): Int = arrows.sumOf { it.calculateScore() }
fun countXRings(): Int = arrows.count { it.isXRing() }
}Key Operations
class ArrowAdjustmentViewModel : ViewModel() {
private val _state = MutableStateFlow(ArrowAdjustmentState())
val state: StateFlow<ArrowAdjustmentState> = _state.asStateFlow()
fun initializeWithDetectionResult(result: ArrowDetectionResult, expectedArrowCount: Int) {
val editableArrows = result.arrows.map { EditableArrow.fromDetectedArrow(it) }
_state.update { it.copy(
arrows = editableArrows,
expectedArrowCount = expectedArrowCount,
showExtraArrowsWarning = editableArrows.size > expectedArrowCount,
showMissingArrowsBanner = editableArrows.size < expectedArrowCount,
isPlacementMode = editableArrows.size < expectedArrowCount
) }
}
fun updateArrowPosition(arrowId: Int, newX: Float, newY: Float) {
_state.update { state ->
state.copy(arrows = state.arrows.map { arrow ->
if (arrow.id == arrowId) {
arrow.copy(
normalizedX = newX.coerceIn(-1.5f, 1.5f),
normalizedY = newY.coerceIn(-1.5f, 1.5f)
)
} else arrow
})
}
}
fun finalizeArrowPosition(arrowId: Int) {
_state.update { state ->
val arrow = state.arrows.find { it.id == arrowId }
if (arrow != null && arrow.isOffTarget()) {
// Remove arrow dragged off target
val updatedArrows = state.arrows.filter { it.id != arrowId }
state.copy(
arrows = updatedArrows,
showMissingArrowsBanner = updatedArrows.size < state.expectedArrowCount
)
} else state
}
}
fun addManualArrow(normalizedX: Float, normalizedY: Float) {
if (!_state.value.isPlacementMode) return
val newArrow = EditableArrow.createManualPlacement(
id = nextArrowId++,
normalizedX = normalizedX,
normalizedY = normalizedY
)
_state.update { state ->
val updatedArrows = state.arrows + newArrow
state.copy(
arrows = updatedArrows,
isPlacementMode = updatedArrows.size < state.expectedArrowCount
)
}
}
fun confirmPlacements(): ConfirmedArrowPlacements {
return ConfirmedArrowPlacements(
arrows = _state.value.arrows.map { arrow ->
ConfirmedArrowPlacements.ArrowPlacement(
normalizedX = arrow.normalizedX,
normalizedY = arrow.normalizedY,
score = arrow.calculateScore(),
isX = arrow.isXRing(),
wasAdjusted = arrow.isManuallyPlaced || wasPositionChanged(arrow)
)
}
)
}
}Integration
Manifest Permissions
<uses-permission android:name="android.permission.CAMERA" />
<uses-feature android:name="android.hardware.camera" android:required="false" />FileProvider Configuration
Add to AndroidManifest.xml:
<provider
android:name="androidx.core.content.FileProvider"
android:authorities="${applicationId}.fileprovider"
android:exported="false"
android:grantUriPermissions="true">
<meta-data
android:name="android.support.FILE_PROVIDER_PATHS"
android:resource="@xml/file_paths" />
</provider>Create res/xml/file_paths.xml:
<?xml version="1.0" encoding="utf-8"?>
<paths>
<cache-path name="camera_images" path="." />
</paths>Integration with Scoring Screen
In ScoreInputSection.kt:
var showCameraScoring by remember { mutableStateOf(false) }
// Camera button
IconButton(onClick = { showCameraScoring = true }) {
Icon(Icons.Default.CameraAlt, contentDescription = "Camera scoring")
}
// Camera scoring flow
if (showCameraScoring) {
CameraScoringFlow(
expectedArrowCount = arrowsPerEnd,
onScoresConfirmed = { placements ->
placements.arrows.forEach { arrow ->
viewModel.addScore(arrow.score, arrow.isX)
}
showCameraScoring = false
},
onCancel = { showCameraScoring = false }
)
}Manual Entry Fallback
When detection fails (model error, poor image quality, etc.), users can fall back to manual arrow placement:
// In ErrorScreen composable
@Composable
private fun ErrorScreen(
message: String,
onRetry: () -> Unit,
onManualEntry: () -> Unit, // NEW: Manual entry option
onCancel: () -> Unit
) {
Column(horizontalAlignment = Alignment.CenterHorizontally) {
// Error display...
Button(onClick = onRetry) {
Text("Try Again")
}
// Manual entry button - always visible on error
OutlinedButton(onClick = onManualEntry) {
Text("Enter Manually")
}
TextButton(onClick = onCancel) {
Text("Cancel")
}
}
}The manual entry option initializes the adjustment UI with no detected arrows, allowing users to tap to place all arrows manually.
ProGuard / R8 Configuration
Add these rules to prevent R8 from stripping TensorFlow Lite classes:
# TensorFlow Lite
-keep class org.tensorflow.** { *; }
-keep interface org.tensorflow.** { *; }
-dontwarn org.tensorflow.**
# TFLite GPU delegate (if used)
-dontwarn org.tensorflow.lite.gpu.**
# CameraX
-keep class androidx.camera.** { *; }Known Issues
16KB Page Size Incompatibility
TensorFlow Lite 2.14.0 libraries are not compatible with Android 15+ devices that require 16KB memory page alignment.
Symptoms:
- App crash on model initialization
UnsatisfiedLinkErroror similar native library errors
Upstream Tracking: TensorFlow GitHub issues
Future Mitigation:
// Check for 16KB page size devices
fun is16KBPageSizeDevice(): Boolean {
return Build.VERSION.SDK_INT >= 35 && /* page size check */
}
// Disable feature on affected devices
if (is16KBPageSizeDevice()) {
state = CameraScoringState.Error("Camera scoring not available on this device")
}Testing
Unit Tests
@Test
fun `score calculation returns correct values`() {
val arrow = DetectedArrow(
id = 1,
normalizedX = 0f,
normalizedY = 0.03f, // Inside X-ring
// ...
distanceFromCenter = 0.03f
)
assertEquals(10, arrow.calculateScore())
assertTrue(arrow.isXRing())
}
@Test
fun `NMS removes overlapping detections`() {
val service = ArrowDetectionService(context)
val detections = listOf(
RawDetection(RectF(0f, 0f, 10f, 10f), 0.9f, 5f, 5f),
RawDetection(RectF(2f, 2f, 12f, 12f), 0.8f, 7f, 7f) // High IoU
)
val result = service.applyNMS(detections)
assertEquals(1, result.size)
assertEquals(0.9f, result[0].confidence)
}UI Tests
@Test
fun `arrow marker drag updates position`() {
val viewModel = ArrowAdjustmentViewModel()
// Initialize with test data
composeTestRule.setContent {
ArrowAdjustmentScreen(viewModel, {}, {})
}
// Perform drag gesture
composeTestRule.onNodeWithTag("arrow_marker_1")
.performTouchInput {
down(center)
moveBy(Offset(50f, 50f))
up()
}
// Verify position updated
assertTrue(viewModel.state.value.arrows[0].normalizedX > 0)
}Related Documentation
- Camera Scoring Overview
- iOS Camera Scoring Guide
- Visual Scoring Guide - Related target face rendering
Last Updated: 2025-12-04 PR: #354