Home > Developer Guide > Features > Camera Scoring > iOS Implementation
iOS Camera Scoring Implementation
Purpose: Document the iOS-specific implementation of camera-based arrow detection
PR: #354
Key Technologies: CoreML, Vision, UIImagePickerController, SwiftUI
Architecture Overview
┌─────────────────────────────────────────────────────────────────┐
│ CameraScoringFlow │
│ (Orchestrates image source → detect → adjust → submit) │
└─────────────────────────────────────────────────────────────────┘
│
┌─────────────────────┼─────────────────────┐
▼ ▼ ▼
┌───────────────┐ ┌───────────────┐ ┌───────────────────────┐
│ ImagePicker │ │ ArrowDetection│ │ CameraScoringAdjust- │
│ (Camera/Photos│ │ Service │ │ mentView │
└───────────────┘ └───────────────┘ └───────────────────────┘
│ │
▼ ▼
┌───────────────┐ ┌───────────────────────┐
│ VNCoreML │ │ ArrowAdjustmentView │
│ Request │ │ Model │
└───────────────┘ └───────────────────────┘
ML Model Integration
Model File
The CoreML model is stored as an .mlpackage in the Xcode project:
ArcheryApprentice/ArrowDetector.mlpackage/
├── Data/com.apple.CoreML/
│ ├── model.mlmodel
│ └── weights/weight.bin
ArrowDetectionService
Location: Services/Camera/ArrowDetectionService.swift
Key Configuration:
class ArrowDetectionService: ObservableObject {
private var model: VNCoreMLModel?
private let confidenceThreshold: Float = 0.40
private let iouThreshold: Float = 0.7
@Published var isInitialized = false
@Published var isProcessing = false
}Initialization:
func initialize() async throws {
guard !isInitialized else { return }
guard let modelURL = Bundle.main.url(forResource: "ArrowDetector", withExtension: "mlmodelc") else {
throw ArrowDetectionError.modelNotFound
}
do {
let mlModel = try MLModel(contentsOf: modelURL)
model = try VNCoreMLModel(for: mlModel)
isInitialized = true
} catch {
throw ArrowDetectionError.modelLoadFailed(error)
}
}Image Processing with Vision
func detectArrows(in image: UIImage) async throws -> ArrowDetectionResult {
guard isInitialized, let model = model else {
try await initialize()
guard let model = self.model else {
throw ArrowDetectionError.modelNotFound
}
return try await performDetection(image: image, model: model)
}
return try await performDetection(image: image, model: model)
}
private func performDetection(image: UIImage, model: VNCoreMLModel) async throws -> ArrowDetectionResult {
isProcessing = true
defer { isProcessing = false }
guard let cgImage = image.cgImage else {
throw ArrowDetectionError.invalidImage
}
return try await withCheckedThrowingContinuation { continuation in
let request = VNCoreMLRequest(model: model) { [weak self] request, error in
// Process results
guard let results = request.results as? [VNRecognizedObjectObservation] else {
// Return empty result
return
}
let detections = self?.processDetections(results, imageWidth: cgImage.width, imageHeight: cgImage.height) ?? []
let result = self?.buildDetectionResult(detections: detections, imageWidth: cgImage.width, imageHeight: cgImage.height)
continuation.resume(returning: result!)
}
request.imageCropAndScaleOption = .scaleFill
let handler = VNImageRequestHandler(cgImage: cgImage, options: [:])
try? handler.perform([request])
}
}Vision Coordinate Conversion
Vision framework uses normalized coordinates (0-1) with origin at bottom-left. Convert to pixel coordinates with top-left origin:
private func processDetections(_ observations: [VNRecognizedObjectObservation], imageWidth: Int, imageHeight: Int) -> [RawDetection] {
var detections: [RawDetection] = []
for observation in observations {
guard observation.confidence >= confidenceThreshold else { continue }
let box = observation.boundingBox
// Convert Vision coordinates to pixel coordinates
let x = box.origin.x * CGFloat(imageWidth)
let y = (1.0 - box.origin.y - box.height) * CGFloat(imageHeight) // Flip Y-axis
let width = box.width * CGFloat(imageWidth)
let height = box.height * CGFloat(imageHeight)
let centerX = x + width / 2
let centerY = y + height / 2
detections.append(RawDetection(
box: CGRect(x: x, y: y, width: width, height: height),
confidence: observation.confidence,
centerX: Float(centerX),
centerY: Float(centerY)
))
}
return applyNMS(detections)
}Non-Maximum Suppression
private func applyNMS(_ detections: [RawDetection]) -> [RawDetection] {
guard !detections.isEmpty else { return [] }
var sorted = detections.sorted { $0.confidence > $1.confidence }
var selected: [RawDetection] = []
while !sorted.isEmpty {
let best = sorted.removeFirst()
selected.append(best)
sorted.removeAll { detection in
calculateIoU(best.box, detection.box) > iouThreshold
}
}
return selected
}
private func calculateIoU(_ box1: CGRect, _ box2: CGRect) -> Float {
let intersection = box1.intersection(box2)
guard !intersection.isNull else { return 0 }
let intersectionArea = intersection.width * intersection.height
let unionArea = box1.width * box1.height + box2.width * box2.height - intersectionArea
return unionArea > 0 ? Float(intersectionArea / unionArea) : 0
}Data Models
ArrowDetectionResult
struct ArrowDetectionResult {
let version: String
let timestamp: Date
let targetDetection: TargetDetection
let arrows: [DetectedArrow]
let imageMetadata: ImageMetadata
init(
version: String = "1.0",
timestamp: Date = Date(),
targetDetection: TargetDetection,
arrows: [DetectedArrow],
imageMetadata: ImageMetadata
) {
// ...
}
}DetectedArrow
struct DetectedArrow: Identifiable {
let id: Int
let normalizedX: Float // -1 to +1
let normalizedY: Float // -1 to +1
let pixelX: Float
let pixelY: Float
let distanceFromCenter: Float
let clockPositionDeg: Float
let confidence: Float
let boundingBox: BoundingBox
func calculateScore() -> Int {
switch distanceFromCenter {
case ...0.05: return 10 // X-ring
case ...0.10: return 10
case ...0.20: return 9
case ...0.30: return 8
case ...0.40: return 7
case ...0.50: return 6
case ...0.60: return 5
case ...0.70: return 4
case ...0.80: return 3
case ...0.90: return 2
case ...1.00: return 1
default: return 0 // Miss
}
}
var isXRing: Bool {
return distanceFromCenter <= 0.05
}
}EditableArrow
Mutable version for the adjustment UI:
struct EditableArrow: Identifiable {
let id: UUID
var normalizedX: Double
var normalizedY: Double
var isManuallyPlaced: Bool
let originalConfidence: Float
func distanceFromCenter() -> Double {
return sqrt(normalizedX * normalizedX + normalizedY * normalizedY)
}
var isOffTarget: Bool {
return distanceFromCenter() > 1.0
}
static func fromDetectedArrow(_ arrow: DetectedArrow) -> EditableArrow {
return EditableArrow(
normalizedX: Double(arrow.normalizedX),
normalizedY: Double(arrow.normalizedY),
isManuallyPlaced: false,
originalConfidence: arrow.confidence
)
}
static func createManualPlacement(normalizedX: Double, normalizedY: Double) -> EditableArrow {
return EditableArrow(
normalizedX: normalizedX,
normalizedY: normalizedY,
isManuallyPlaced: true,
originalConfidence: 1.0
)
}
}Camera Scoring Flow
Location: Views/Camera/CameraScoringFlow.swift
State Machine
enum CameraScoringState {
case requestingPermission
case capturing
case processing
case adjusting(ArrowDetectionResult)
case error(String)
}Flow View
struct CameraScoringFlow: View {
let expectedArrowCount: Int
let onScoresConfirmed: (ConfirmedArrowPlacements) -> Void
let onCancel: () -> Void
@StateObject private var detectionService = ArrowDetectionService()
@StateObject private var adjustmentViewModel = ArrowAdjustmentViewModel()
@State private var state: CameraScoringState = .capturing
@State private var showCamera = false
@State private var showImagePicker = false
var body: some View {
NavigationView {
Group {
switch state {
case .requestingPermission:
PermissionRequestView()
case .capturing:
ImageSourceSelectionView(
onCameraSelected: { showCamera = true },
onPhotoLibrarySelected: { showImagePicker = true }
)
case .processing:
ProcessingView()
case .adjusting:
CameraScoringAdjustmentView(
viewModel: adjustmentViewModel,
onConfirm: onScoresConfirmed,
onCancel: { adjustmentViewModel.reset(); onCancel() }
)
case .error(let message):
ErrorView(message: message, onRetry: { state = .capturing }, onCancel: onCancel)
}
}
.navigationTitle("Camera Scoring")
}
.sheet(isPresented: $showCamera) {
ImagePicker(sourceType: .camera) { image in
handleSelectedImage(image)
}
}
.sheet(isPresented: $showImagePicker) {
ImagePicker(sourceType: .photoLibrary) { image in
handleSelectedImage(image)
}
}
}
private func handleSelectedImage(_ image: UIImage?) {
guard let image = image else {
state = .error("No image selected")
return
}
state = .processing
Task {
do {
let result = try await detectionService.detectArrows(in: image)
await MainActor.run {
adjustmentViewModel.initializeWithDetectionResult(result, expectedArrowCount: expectedArrowCount)
state = .adjusting(result)
}
} catch {
await MainActor.run {
state = .error(error.localizedDescription)
}
}
}
}
}Image Source Selection
Unlike Android, iOS offers both camera and photo library options:
private struct ImageSourceSelectionView: View {
let onCameraSelected: () -> Void
let onPhotoLibrarySelected: () -> Void
var body: some View {
VStack(spacing: 24) {
Text("Capture Your Target")
.font(.title2)
.fontWeight(.semibold)
Text("Take a photo of your target or select one from your library")
.font(.subheadline)
.foregroundColor(.secondary)
VStack(spacing: 16) {
Button(action: onCameraSelected) {
Label("Take Photo", systemImage: "camera.fill")
.frame(maxWidth: .infinity)
}
.buttonStyle(.borderedProminent)
Button(action: onPhotoLibrarySelected) {
Label("Choose from Library", systemImage: "photo.on.rectangle")
.frame(maxWidth: .infinity)
}
.buttonStyle(.bordered)
}
.padding(.horizontal, 32)
}
}
}UIImagePickerController Bridge
struct ImagePicker: UIViewControllerRepresentable {
let sourceType: UIImagePickerController.SourceType
let onImageSelected: (UIImage?) -> Void
@Environment(\.dismiss) private var dismiss
func makeUIViewController(context: Context) -> UIImagePickerController {
let picker = UIImagePickerController()
picker.sourceType = sourceType
picker.delegate = context.coordinator
return picker
}
func updateUIViewController(_ uiViewController: UIImagePickerController, context: Context) {}
func makeCoordinator() -> Coordinator {
Coordinator(self)
}
class Coordinator: NSObject, UIImagePickerControllerDelegate, UINavigationControllerDelegate {
let parent: ImagePicker
init(_ parent: ImagePicker) {
self.parent = parent
}
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [UIImagePickerController.InfoKey: Any]) {
let image = info[.originalImage] as? UIImage
parent.onImageSelected(image)
parent.dismiss()
}
func imagePickerControllerDidCancel(_ picker: UIImagePickerController) {
parent.onImageSelected(nil)
parent.dismiss()
}
}
}Adjustment UI
Location: Views/Camera/CameraScoringAdjustmentView.swift
Main View
struct CameraScoringAdjustmentView: View {
@ObservedObject var viewModel: ArrowAdjustmentViewModel
let onConfirm: (ConfirmedArrowPlacements) -> Void
let onCancel: () -> Void
var body: some View {
VStack(spacing: 16) {
// Score preview header
ScorePreviewHeader(state: viewModel.state)
// Warning banners
if viewModel.state.showExtraArrowsWarning {
ExtraArrowsWarning(extraCount: viewModel.state.extraCount)
}
if viewModel.state.showMissingArrowsBanner {
MissingArrowsBanner(
missingCount: viewModel.state.missingCount,
isPlacementMode: viewModel.state.isPlacementMode,
onEnterPlacementMode: { viewModel.togglePlacementMode() }
)
}
// Target face with arrows
TargetFaceWithArrows(...)
.aspectRatio(1, contentMode: .fit)
// Action buttons
HStack(spacing: 16) {
Button(action: onCancel) {
Label("Cancel", systemImage: "xmark")
.frame(maxWidth: .infinity)
}
.buttonStyle(.bordered)
Button(action: { onConfirm(viewModel.confirmPlacements()) }) {
Label("Confirm", systemImage: "checkmark")
.frame(maxWidth: .infinity)
}
.buttonStyle(.borderedProminent)
}
}
.padding()
}
}Target Face with SwiftUI
private struct TargetFaceWithArrows: View {
let arrows: [EditableArrow]
let isPlacementMode: Bool
let onArrowDrag: (UUID, Double, Double) -> Void
let onArrowDragEnd: (UUID) -> Void
let onTargetTap: (Double, Double) -> Void
let getArrowConfidence: (UUID) -> Float
var body: some View {
GeometryReader { geometry in
let size = min(geometry.size.width, geometry.size.height)
let center = CGPoint(x: size / 2, y: size / 2)
let radius = size / 2
ZStack {
// Target rings
TargetRingsView(size: size)
// Placement mode overlay
if isPlacementMode {
Circle()
.fill(Color.green.opacity(0.1))
.frame(width: size, height: size)
}
// Arrow markers - uses drag start position tracking (see Drag Gesture Fix Pattern)
ForEach(arrows) { arrow in
DraggableArrowMarkerFixed(
arrow: arrow,
confidence: getArrowConfidence(arrow.id),
center: center,
radius: radius,
onPositionChange: { newX, newY in
onArrowDrag(arrow.id, newX, newY)
},
onDragEnd: { onArrowDragEnd(arrow.id) }
)
}
}
.frame(width: size, height: size)
.contentShape(Rectangle())
.onTapGesture { location in
if isPlacementMode {
let normalizedX = (location.x - center.x) / radius
let normalizedY = (location.y - center.y) / radius
onTargetTap(normalizedX, normalizedY)
}
}
}
}
}Target Rings Rendering
private struct TargetRingsView: View {
let size: CGFloat
// Standard 10-ring colors - matches DomainColor
private let ringColors: [Color] = [
.white, // Ring 1
.white, // Ring 2
.black, // Ring 3
.black, // Ring 4
Color(red: 0, green: 0.71, blue: 0.85), // Ring 5 - Blue (#00B4D8)
Color(red: 0, green: 0.71, blue: 0.85), // Ring 6 - Blue
.red, // Ring 7
.red, // Ring 8
Color(red: 1, green: 0.84, blue: 0), // Ring 9 - Gold (#FFD700)
Color(red: 1, green: 0.84, blue: 0) // Ring 10 - Gold
]
var body: some View {
ZStack {
// Draw from largest to smallest
ForEach((0..<10).reversed(), id: \.self) { index in
let ringRadius = size / 2 * CGFloat(index + 1) / 10
Circle()
.fill(ringColors[9 - index])
.frame(width: ringRadius * 2, height: ringRadius * 2)
Circle()
.stroke(Color.black, lineWidth: 1)
.frame(width: ringRadius * 2, height: ringRadius * 2)
}
// X-ring
Circle()
.fill(Color(red: 1, green: 0.84, blue: 0))
.frame(width: size * 0.05, height: size * 0.05)
Circle()
.stroke(Color.black, lineWidth: 1)
.frame(width: size * 0.05, height: size * 0.05)
}
}
}Draggable Arrow Marker
private struct DraggableArrowMarker: View {
let arrow: EditableArrow
let confidence: Float
let center: CGPoint
let radius: CGFloat
let onDrag: (Double, Double) -> Void
let onDragEnd: () -> Void
private var markerColor: Color {
if arrow.isManuallyPlaced {
return .green
} else if confidence >= 0.7 {
return .red
} else if confidence >= 0.5 {
return .orange
} else {
return Color(red: 1, green: 0.34, blue: 0.13) // Deep orange
}
}
private var position: CGPoint {
CGPoint(
x: center.x + CGFloat(arrow.normalizedX) * radius,
y: center.y + CGFloat(arrow.normalizedY) * radius
)
}
var body: some View {
ZStack {
// Marker circle
Circle()
.fill(markerColor)
.frame(width: 24, height: 24)
.shadow(radius: 2)
// Inner dot
Circle()
.fill(Color.white)
.frame(width: 8, height: 8)
// Score label
if !arrow.isOffTarget {
Text("\(arrow.calculateScore())")
.font(.caption2)
.fontWeight(.bold)
.offset(y: 20)
}
}
.position(position)
.gesture(
DragGesture()
.onChanged { value in
onDrag(value.translation.width, value.translation.height)
}
.onEnded { _ in
onDragEnd()
}
)
}
}Critical: Drag Gesture Fix Pattern
Problem: SwiftUI’s DragGesture provides cumulative translation from the drag start. However, when you add the translation to arrow.normalizedX/Y on each change, you’re adding to an already-updated position, causing exponential drift and snap-back behavior.
Solution: Track the drag start position in @State and calculate the new position as startPosition + translation:
private struct DraggableArrowMarkerFixed: View {
let arrow: EditableArrow
let confidence: Float
let center: CGPoint
let radius: CGFloat
let onPositionChange: (Double, Double) -> Void
let onDragEnd: () -> Void
// Track position at drag start
@State private var dragStartPosition: (x: Double, y: Double)?
var body: some View {
ZStack {
// Marker UI...
}
.position(position)
.gesture(
DragGesture()
.onChanged { value in
// Capture start position on first drag event
if dragStartPosition == nil {
dragStartPosition = (arrow.normalizedX, arrow.normalizedY)
}
guard let start = dragStartPosition else { return }
// Calculate new position from START + translation
let newX = start.x + Double(value.translation.width) / Double(radius)
let newY = start.y + Double(value.translation.height) / Double(radius)
onPositionChange(newX, newY)
}
.onEnded { _ in
dragStartPosition = nil // Reset for next drag
onDragEnd()
}
)
}
}Key Difference from Android:
- Android uses delta-based updates (pass
dx, dyto ViewModel) - iOS tracks the start position and calculates
startPos + translation
Both solutions avoid the closure capture problem where the arrow position becomes stale during the drag gesture.
Adjustment ViewModel
Location: Views/Camera/CameraScoringAdjustmentView.swift
class ArrowAdjustmentViewModel: ObservableObject {
@Published var state = ArrowAdjustmentState()
private var originalPositions: [UUID: (Double, Double)] = [:]
func initializeWithDetectionResult(_ result: ArrowDetectionResult, expectedArrowCount: Int) {
let editableArrows = result.arrows.map { arrow -> EditableArrow in
let editable = EditableArrow.fromDetectedArrow(arrow)
originalPositions[editable.id] = (editable.normalizedX, editable.normalizedY)
return editable
}
state = ArrowAdjustmentState(
arrows: editableArrows,
expectedArrowCount: expectedArrowCount,
isPlacementMode: editableArrows.count < expectedArrowCount
)
}
func updateArrowPosition(arrowId: UUID, newX: Double, newY: Double) {
if let index = state.arrows.firstIndex(where: { $0.id == arrowId }) {
state.arrows[index].normalizedX = min(1.5, max(-1.5, newX))
state.arrows[index].normalizedY = min(1.5, max(-1.5, newY))
}
}
func finalizeArrowPosition(arrowId: UUID) {
if let arrow = state.arrows.first(where: { $0.id == arrowId }),
arrow.isOffTarget {
removeArrow(arrowId: arrowId)
}
}
func addManualArrow(normalizedX: Double, normalizedY: Double) {
guard state.isPlacementMode else { return }
let newArrow = EditableArrow.createManualPlacement(
normalizedX: normalizedX,
normalizedY: normalizedY
)
state.arrows.append(newArrow)
let stillMissing = state.arrows.count < state.expectedArrowCount
state.isPlacementMode = stillMissing
state.showMissingArrowsBanner = stillMissing
}
func removeArrow(arrowId: UUID) {
state.arrows.removeAll { $0.id == arrowId }
state.showMissingArrowsBanner = state.arrows.count < state.expectedArrowCount
state.isPlacementMode = state.arrows.count < state.expectedArrowCount
}
func confirmPlacements() -> ConfirmedArrowPlacements {
let placements = state.arrows.map { arrow -> ConfirmedArrowPlacements.ArrowPlacementResult in
let original = originalPositions[arrow.id]
let wasAdjusted = arrow.isManuallyPlaced || (original.map { (origX, origY) in
let dx = arrow.normalizedX - origX
let dy = arrow.normalizedY - origY
return sqrt(dx * dx + dy * dy) > 0.01
} ?? false)
return ConfirmedArrowPlacements.ArrowPlacementResult(
normalizedX: arrow.normalizedX,
normalizedY: arrow.normalizedY,
score: arrow.calculateScore(),
isX: arrow.isXRing,
wasAdjusted: wasAdjusted
)
}
return ConfirmedArrowPlacements(arrows: placements)
}
func reset() {
originalPositions.removeAll()
state = ArrowAdjustmentState()
}
}Info.plist Configuration
Add camera and photo library usage descriptions:
<key>NSCameraUsageDescription</key>
<string>Camera access is needed to photograph your target for automatic arrow detection</string>
<key>NSPhotoLibraryUsageDescription</key>
<string>Photo library access allows you to select existing target photos for arrow detection</string>Integration with Scoring Views
ScoringView Integration
// In ScoringView.swift
@State private var showCameraScoring = false
// Tab selector or button
Button(action: { showCameraScoring = true }) {
Label("Camera", systemImage: "camera.fill")
}
// Sheet presentation
.sheet(isPresented: $showCameraScoring) {
CameraScoringFlow(
expectedArrowCount: arrowsPerEnd,
onScoresConfirmed: { placements in
for arrow in placements.arrows {
viewModel.addScore(arrow.score, isX: arrow.isX)
}
showCameraScoring = false
},
onCancel: { showCameraScoring = false }
)
}Manual Entry Fallback
When detection fails, users can manually place all arrows:
private struct ErrorView: View {
let message: String
let onRetry: () -> Void
let onManualEntry: () -> Void // NEW: Manual entry option
let onCancel: () -> Void
var body: some View {
VStack(spacing: 16) {
Image(systemName: "exclamationmark.triangle.fill")
.font(.system(size: 50))
.foregroundColor(.orange)
Text("Detection Error")
.font(.title2)
.fontWeight(.semibold)
Text(message)
.font(.subheadline)
.foregroundColor(.secondary)
VStack(spacing: 12) {
Button("Try Again", action: onRetry)
.buttonStyle(.borderedProminent)
// Manual entry - always visible on error
Button("Enter Manually", action: onManualEntry)
.buttonStyle(.bordered)
Button("Cancel", action: onCancel)
.foregroundColor(.secondary)
}
}
}
}The onManualEntry handler initializes the adjustment ViewModel for manual placement:
func initializeForManualEntry(expectedArrowCount: Int) {
state = ArrowAdjustmentState(
arrows: [], // No detected arrows
expectedArrowCount: expectedArrowCount,
isPlacementMode: true // Start in placement mode
)
}Error Handling
enum ArrowDetectionError: LocalizedError {
case modelNotFound
case modelLoadFailed(Error)
case imageProcessingFailed
case detectionFailed(Error)
case invalidImage
var errorDescription: String? {
switch self {
case .modelNotFound:
return "Arrow detection model not found"
case .modelLoadFailed(let error):
return "Failed to load model: \(error.localizedDescription)"
case .imageProcessingFailed:
return "Failed to process image"
case .detectionFailed(let error):
return "Detection failed: \(error.localizedDescription)"
case .invalidImage:
return "Invalid image provided"
}
}
}Testing
Unit Tests
import XCTest
class ArrowDetectionTests: XCTestCase {
func testScoreCalculation() {
let arrow = DetectedArrow(
id: 1,
normalizedX: 0,
normalizedY: 0.03,
pixelX: 500,
pixelY: 503,
distanceFromCenter: 0.03,
clockPositionDeg: 90,
confidence: 0.85,
boundingBox: BoundingBox(x: 480, y: 483, width: 40, height: 40)
)
XCTAssertEqual(arrow.calculateScore(), 10)
XCTAssertTrue(arrow.isXRing)
}
func testEditableArrowDistanceCalculation() {
let arrow = EditableArrow(
normalizedX: 0.3,
normalizedY: 0.4,
isManuallyPlaced: false,
originalConfidence: 0.9
)
let distance = arrow.distanceFromCenter()
XCTAssertEqual(distance, 0.5, accuracy: 0.001)
}
func testArrowRemovalOnDragOffTarget() {
let viewModel = ArrowAdjustmentViewModel()
// Initialize with test data
// ...
let arrowId = viewModel.state.arrows[0].id
viewModel.updateArrowPosition(arrowId: arrowId, newX: 1.5, newY: 1.5)
viewModel.finalizeArrowPosition(arrowId: arrowId)
XCTAssertFalse(viewModel.state.arrows.contains { $0.id == arrowId })
}
}UI Tests
func testCameraScoringFlowNavigation() {
let app = XCUIApplication()
app.launch()
// Navigate to scoring view
app.buttons["Score"].tap()
// Open camera scoring
app.buttons["Camera"].tap()
// Verify image source selection appears
XCTAssertTrue(app.staticTexts["Capture Your Target"].exists)
XCTAssertTrue(app.buttons["Take Photo"].exists)
XCTAssertTrue(app.buttons["Choose from Library"].exists)
}Platform Differences vs Android
| Aspect | iOS | Android |
|---|---|---|
| ML Runtime | CoreML + Vision | TensorFlow Lite |
| Image Source | Camera + Photo Library | Camera only |
| Picker | UIImagePickerController | CameraX TakePicture |
| UI Framework | SwiftUI | Jetpack Compose |
| Arrow ID | UUID | Int |
| Coordinate Type | Double | Float |
Related Documentation
- Camera Scoring Overview
- Android Camera Scoring Guide
- Visual Scoring Guide - Related target face rendering
- KMP iOS Patterns - Swift-Kotlin interop patterns
Last Updated: 2025-12-04 PR: #354