Core ML, Apple'ın on-device machine learning framework'üdür. Kullanıcı gizliliğini koruyarak, internet bağlantısı gerektirmeden güçlü ML özellikleri sunar. iPhone'un Neural Engine'i ile milisaniyeler içinde sonuç alırsınız. Bu rehberde Core ML'i production uygulamalarına entegre etmeyi A'dan Z'ye öğreneceksiniz.
İçindekiler
- On-Device ML Avantajları
- Core ML Model Entegrasyonu
- Vision Framework ile Görüntü Analizi
- Natural Language Processing
- Model Performans Optimizasyonu
- SwiftUI Entegrasyonu
- Sonuç ve Öneriler
On-Device ML Avantajları
Özellik | Cloud ML | On-Device (Core ML) |
|---|---|---|
Latency | 100-500ms | 10-50ms |
Privacy | ❌ Veri sunucuya gider | ✅ Veri cihazda kalır |
Offline | ❌ Internet gerekli | ✅ Her zaman çalışır |
Cost | 💰 API maliyeti | 💚 Ücretsiz |
💡 Pro Tip: Core ML otomatik olarak en uygun compute unit'i (CPU/GPU/Neural Engine) seçer. MLModelConfiguration.computeUnits ile manuel kontrol edebilirsiniz.Core ML Model Entegrasyonu
Model Dosyası Ekleme
swift
1// 1. .mlmodel dosyasını projeye ekleyin2// Xcode otomatik olarak Swift sınıfı oluşturur3 4import CoreML5 6class ImageClassifier {7 private let model: VNCoreMLModel8 9 init() throws {10 let config = MLModelConfiguration()11 config.computeUnits = .all // CPU, GPU, Neural Engine12 13 let mlModel = try MobileNetV2(configuration: config).model14 model = try VNCoreMLModel(for: mlModel)15 }16 17 func classify(image: UIImage) async throws -> [Classification] {18 guard let ciImage = CIImage(image: image) else {19 throw ClassificationError.invalidImage20 }21 22 return try await withCheckedThrowingContinuation { continuation in23 let request = VNCoreMLRequest(model: model) { request, error in24 if let error = error {25 continuation.resume(throwing: error)26 return27 }28 29 let results = request.results as? [VNClassificationObservation] ?? []30 let classifications = results.prefix(5).map {31 Classification(label: $0.identifier, confidence: $0.confidence)32 }33 continuation.resume(returning: classifications)34 }35 36 request.imageCropAndScaleOption = .centerCrop37 38 let handler = VNImageRequestHandler(ciImage: ciImage, options: [:])39 try? handler.perform([request])40 }41 }42}43 44struct Classification {45 let label: String46 let confidence: Float47}Vision Framework ile Görüntü Analizi
Yüz Tespiti
swift
1class FaceDetector {2 func detectFaces(in image: UIImage) async throws -> [DetectedFace] {3 guard let ciImage = CIImage(image: image) else {4 throw DetectionError.invalidImage5 }6 7 return try await withCheckedThrowingContinuation { continuation in8 let request = VNDetectFaceLandmarksRequest { request, error in9 if let error = error {10 continuation.resume(throwing: error)11 return12 }13 14 let faces = (request.results as? [VNFaceObservation] ?? []).map { observation in15 DetectedFace(16 boundingBox: observation.boundingBox,17 landmarks: observation.landmarks,18 roll: observation.roll?.floatValue,19 yaw: observation.yaw?.floatValue20 )21 }22 continuation.resume(returning: faces)23 }24 25 let handler = VNImageRequestHandler(ciImage: ciImage, options: [:])26 try? handler.perform([request])27 }28 }29}30 31struct DetectedFace {32 let boundingBox: CGRect33 let landmarks: VNFaceLandmarks2D?34 let roll: Float?35 let yaw: Float?36}Nesne Tespiti
swift
1class ObjectDetector {2 private let model: VNCoreMLModel3 4 init() throws {5 let mlModel = try YOLOv3(configuration: MLModelConfiguration()).model6 model = try VNCoreMLModel(for: mlModel)7 }8 9 func detect(in image: UIImage) async throws -> [DetectedObject] {10 guard let ciImage = CIImage(image: image) else {11 throw DetectionError.invalidImage12 }13 14 return try await withCheckedThrowingContinuation { continuation in15 let request = VNCoreMLRequest(model: model) { request, error in16 if let error = error {17 continuation.resume(throwing: error)18 return19 }20 21 let results = request.results as? [VNRecognizedObjectObservation] ?? []22 let objects = results.map { observation in23 DetectedObject(24 label: observation.labels.first?.identifier ?? "Unknown",25 confidence: observation.confidence,26 boundingBox: observation.boundingBox27 )28 }29 continuation.resume(returning: objects)30 }31 32 let handler = VNImageRequestHandler(ciImage: ciImage, options: [:])33 try? handler.perform([request])34 }35 }36}Natural Language Processing
swift
1import NaturalLanguage2 3class TextAnalyzer {4 // Duygu Analizi5 func analyzeSentiment(_ text: String) -> Double? {6 let tagger = NLTagger(tagSchemes: [.sentimentScore])7 tagger.string = text8 9 let (sentiment, _) = tagger.tag(at: text.startIndex, unit: .paragraph, scheme: .sentimentScore)10 return sentiment.flatMap { Double($0.rawValue) }11 }12 13 // Dil Tespiti14 func detectLanguage(_ text: String) -> NLLanguage? {15 let recognizer = NLLanguageRecognizer()16 recognizer.processString(text)17 return recognizer.dominantLanguage18 }19 20 // Named Entity Recognition21 func extractEntities(_ text: String) -> [Entity] {22 let tagger = NLTagger(tagSchemes: [.nameType])23 tagger.string = text24 25 var entities: [Entity] = []26 27 tagger.enumerateTags(in: text.startIndex..<text.endIndex, unit: .word, scheme: .nameType) { tag, range in28 if let tag = tag {29 entities.append(Entity(30 text: String(text[range]),31 type: EntityType(from: tag)32 ))33 }34 return true35 }36 37 return entities38 }39 40 // Metin Benzerliği41 func similarity(between text1: String, and text2: String) -> Double {42 let embedding = NLEmbedding.wordEmbedding(for: .english)43 44 guard let vec1 = embedding?.vector(for: text1),45 let vec2 = embedding?.vector(for: text2) else {46 return 047 }48 49 return cosineSimilarity(vec1, vec2)50 }51 52 private func cosineSimilarity(_ a: [Double], _ b: [Double]) -> Double {53 let dotProduct = zip(a, b).map(*).reduce(0, +)54 let magnitudeA = sqrt(a.map { $0 * $0 }.reduce(0, +))55 let magnitudeB = sqrt(b.map { $0 * $0 }.reduce(0, +))56 return dotProduct / (magnitudeA * magnitudeB)57 }58}Model Performans Optimizasyonu
swift
1// Model Quantization2let config = MLModelConfiguration()3config.computeUnits = .cpuAndNeuralEngine // Neural Engine kullan4 5// Model Compilation (ilk kullanımda)6let compiledModelURL = try MLModel.compileModel(at: modelURL)7let model = try MLModel(contentsOf: compiledModelURL, configuration: config)8 9// Batch Prediction10func batchPredict(images: [UIImage]) async throws -> [[Classification]] {11 let featureProvider = try images.map { image -> MLFeatureProvider in12 // Convert to MLFeatureValue13 }14 15 let batchProvider = MLArrayBatchProvider(array: featureProvider)16 let results = try model.predictions(fromBatch: batchProvider)17 18 // Process results19}SwiftUI Entegrasyonu
swift
1struct CameraMLView: View {2 @StateObject private var viewModel = CameraMLViewModel()3 4 var body: some View {5 ZStack {6 CameraPreview(session: viewModel.session)7 8 // Detection overlays9 ForEach(viewModel.detectedObjects) { object in10 Rectangle()11 .stroke(Color.green, lineWidth: 2)12 .frame(13 width: object.boundingBox.width * UIScreen.main.bounds.width,14 height: object.boundingBox.height * UIScreen.main.bounds.height15 )16 .position(17 x: object.boundingBox.midX * UIScreen.main.bounds.width,18 y: (1 - object.boundingBox.midY) * UIScreen.main.bounds.height19 )20 .overlay(21 Text("\(object.label) \(Int(object.confidence * 100))%")22 .font(.caption)23 .foregroundColor(.white)24 .padding(4)25 .background(Color.green)26 )27 }28 }29 .onAppear {30 viewModel.startSession()31 }32 }33}Model Performans Optimizasyonu
swift
1// Model Configuration2let config = MLModelConfiguration()3config.computeUnits = .cpuAndNeuralEngine // ANE'yi zorla4config.allowLowPrecisionAccumulationOnGPU = true // FP16 kullan5 6// Model Compilation (ilk kullanımda)7let compiledModelURL = try MLModel.compileModel(at: modelURL)8let model = try MLModel(contentsOf: compiledModelURL, configuration: config)9 10// Batch Prediction (performans artışı)11func batchPredict(images: [UIImage]) async throws -> [[Classification]] {12 try await withThrowingTaskGroup(of: (Int, [Classification]).self) { group in13 for (index, image) in images.enumerated() {14 group.addTask { (index, try await self.classify(image: image)) }15 }16 var results = [[Classification]](repeating: [], count: images.count)17 for try await (index, classifications) in group {18 results[index] = classifications19 }20 return results21 }22}Okuyucu Ödülü
Tebrikler! Bu yazıyı sonuna kadar okuduğun için sana özel bir hediyem var:
ALTIN İPUCU
Bu yazının en değerli bilgisi
Bu ipucu, yazının en önemli çıkarımını içeriyor.
Sonuç ve Öneriler
Key Takeaways
- ✅ On-device ML - Gizlilik ve hız avantajı
- ✅ Vision Framework - Görüntü analizi için optimize
- ✅ NLP Framework - Metin analizi kolay
- ✅ Create ML - Custom model eğitimi
- ✅ Neural Engine - Maksimum performans
Kaynaklar
Easter Egg
Gizli bir bilgi buldun!
Bu bölümde gizli bir bilgi var. Keşfetmek ister misin?

