aves_mio1/lib/model/db/db_sqflite.dart.ok
FabioMich66 084fa184da
Some checks failed
Quality check / Flutter analysis (push) Has been cancelled
Quality check / CodeQL analysis (java-kotlin) (push) Has been cancelled
ok con video e foto in galleria aves
2026-03-17 12:19:38 +01:00

791 lines
26 KiB
Text
Raw Permalink Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

// lib/model/db/db_sqflite.dart
import 'dart:io';
import 'package:aves/model/covers.dart';
import 'package:aves/model/db/db.dart';
import 'package:aves/model/db/db_sqflite_schema.dart';
import 'package:aves/model/db/db_sqflite_upgrade.dart';
import 'package:aves/model/dynamic_albums.dart';
import 'package:aves/model/entry/entry.dart';
import 'package:aves/model/favourites.dart';
import 'package:aves/model/filters/filters.dart';
import 'package:aves/model/metadata/address.dart';
import 'package:aves/model/metadata/catalog.dart';
import 'package:aves/model/metadata/trash.dart';
import 'package:aves/model/vaults/details.dart';
import 'package:aves/model/viewer/video_playback.dart';
import 'package:aves/services/common/services.dart';
import 'package:collection/collection.dart';
import 'package:flutter/foundation.dart';
import 'package:sqflite/sqflite.dart';
// --- RIMOSSO: import del sync remoto dal layer DB ---
// import 'package:aves/remote/run_remote_sync.dart' as remote;
class SqfliteLocalMediaDb implements LocalMediaDb {
late Database _db;
// 🔴 4A: espone la connessione nativa aperta dal loader
@override
Database get rawDb => _db;
@override
Future<String> get path async => pContext.join(await getDatabasesPath(), 'metadata.db');
static const entryTable = SqfliteLocalMediaDbSchema.entryTable;
static const dateTakenTable = SqfliteLocalMediaDbSchema.dateTakenTable;
static const metadataTable = SqfliteLocalMediaDbSchema.metadataTable;
static const addressTable = SqfliteLocalMediaDbSchema.addressTable;
static const favouriteTable = SqfliteLocalMediaDbSchema.favouriteTable;
static const coverTable = SqfliteLocalMediaDbSchema.coverTable;
static const dynamicAlbumTable = SqfliteLocalMediaDbSchema.dynamicAlbumTable;
static const vaultTable = SqfliteLocalMediaDbSchema.vaultTable;
static const trashTable = SqfliteLocalMediaDbSchema.trashTable;
static const videoPlaybackTable = SqfliteLocalMediaDbSchema.videoPlaybackTable;
static const _entryInsertSliceMaxCount = 10000; // number of entries
static const _queryCursorBufferSize = 1000; // number of rows
static int _lastId = 0;
@override
int get nextId => ++_lastId;
@override
Future<void> init() async {
_db = await openDatabase(
await path,
onCreate: (db, version) => SqfliteLocalMediaDbSchema.createLatestVersion(db),
onUpgrade: LocalMediaDbUpgrader.upgradeDb,
version: 15,
);
// Log di servizio: path del DB usato dal loader
try {
final dbPath = await path;
debugPrint('[localMediaDb] opened db path=$dbPath');
} catch (_) {}
// ✅ Guardia idempotente: assicura colonne/indici utili anche per i remoti
await _ensureRemoteColumns(_db);
final maxIdRows = await _db.rawQuery('SELECT MAX(id) AS maxId FROM $entryTable');
_lastId = (maxIdRows.firstOrNull?['maxId'] as int?) ?? 0;
// --- RIMOSSO: trigger di sync remoto in debug ---------------------------
// Il sync ora parte SOLO da HomePage (rrs.runRemoteSyncOnceManaged()) post-loading.
}
@override
Future<int> dbFileSize() async {
final file = File(await path);
return await file.exists() ? await file.length() : 0;
}
@override
Future<void> reset() async {
debugPrint('$runtimeType reset');
await _db.close();
await deleteDatabase(await path);
await init();
}
@override
Future<void> removeIds(Set<int> ids, {Set<EntryDataType>? dataTypes}) async {
if (ids.isEmpty) return;
final _dataTypes = dataTypes ?? EntryDataType.values.toSet();
final batch = _db.batch();
const where = 'id = ?';
const coverWhere = 'entryId = ?';
ids.forEach((id) {
final whereArgs = [id];
if (_dataTypes.contains(EntryDataType.basic)) {
batch.delete(entryTable, where: where, whereArgs: whereArgs);
}
if (_dataTypes.contains(EntryDataType.catalog)) {
batch.delete(dateTakenTable, where: where, whereArgs: whereArgs);
batch.delete(metadataTable, where: where, whereArgs: whereArgs);
}
if (_dataTypes.contains(EntryDataType.address)) {
batch.delete(addressTable, where: where, whereArgs: whereArgs);
}
if (_dataTypes.contains(EntryDataType.references)) {
batch.delete(favouriteTable, where: where, whereArgs: whereArgs);
batch.delete(coverTable, where: coverWhere, whereArgs: whereArgs);
batch.delete(trashTable, where: where, whereArgs: whereArgs);
batch.delete(videoPlaybackTable, where: where, whereArgs: whereArgs);
}
});
await batch.commit(noResult: true);
}
// entries
@override
Future<void> clearEntries() async {
final count = await _db.delete(entryTable, where: '1');
debugPrint('$runtimeType clearEntries deleted $count rows');
}
@override
Future<Set<AvesEntry>> loadEntries({int? origin, String? directory}) async {
// Costruzione dinamica della WHERE
String? where;
final whereArgs = <Object?>[];
if (origin != null) {
where = 'origin = ?';
whereArgs.add(origin);
}
// 📌 Diagnostica: segna i parametri logici dingresso
debugPrint('[localMediaDb] loadEntries(origin=$origin, directory=$directory)');
final entries = <AvesEntry>{};
if (directory != null) {
final separator = pContext.separator;
if (!directory.endsWith(separator)) {
directory = '$directory$separator';
}
where = '${where != null ? '$where AND ' : ''}path LIKE ?';
whereArgs.add('$directory%');
debugPrint('[localMediaDb] SQL(queryCursor, by directory) WHERE="$where" args=$whereArgs');
final cursor = await _db.queryCursor(entryTable, where: where, whereArgs: whereArgs, bufferSize: _queryCursorBufferSize);
final dirLength = directory.length;
var readRows = 0;
while (await cursor.moveNext()) {
final row = cursor.current;
readRows++;
// skip entries in subfolders
final path = row['path'] as String?;
if (path != null && !path.substring(dirLength).contains(separator)) {
entries.add(AvesEntry.fromMap(row));
}
}
debugPrint('[localMediaDb] rows read=$readRows kept=${entries.length} (directory filter)');
} else {
debugPrint('[localMediaDb] SQL(queryCursor) WHERE="${where ?? '—'}" args=$whereArgs');
final cursor = await _db.queryCursor(entryTable, where: where, whereArgs: whereArgs, bufferSize: _queryCursorBufferSize);
var readRows = 0;
while (await cursor.moveNext()) {
readRows++;
entries.add(AvesEntry.fromMap(cursor.current));
}
debugPrint('[localMediaDb] rows read=$readRows kept=${entries.length}');
// 🔎 Fallback di sola diagnostica: se stiamo cercando remoti (origin==1)
// ma non troviamo nulla, verifichiamo se in tabella ci sono righe origin=1
if ((origin == 1) && entries.isEmpty) {
try {
final countRows = await _db.rawQuery('SELECT COUNT(*) AS c FROM $entryTable WHERE origin=1');
final c = (countRows.firstOrNull?['c'] as int?) ?? 0;
debugPrint('[localMediaDb][diag] check COUNT(origin=1)=$c');
if (c > 0) {
final sample = await _db.rawQuery(
'SELECT id, remoteId, provider, uri, trashed, dateModifiedMillis '
'FROM $entryTable WHERE origin=1 ORDER BY id DESC LIMIT 5',
);
debugPrint('[localMediaDb][diag] sample origin=1 = $sample');
}
} catch (e, st) {
debugPrint('[localMediaDb][diag] fallback check failed: $e\n$st');
}
}
}
return entries;
}
@override
Future<Set<AvesEntry>> loadEntriesById(Set<int> ids) => _getByIds(ids, entryTable, AvesEntry.fromMap);
@override
Future<void> insertEntries(Set<AvesEntry> entries) async {
if (entries.isEmpty) return;
final stopwatch = Stopwatch()..start();
int inserted = 0;
await Future.forEach(entries.slices(_entryInsertSliceMaxCount), (slice) async {
debugPrint('$runtimeType saveEntries inserting slice of [${inserted + 1}, ${inserted + slice.length}] entries');
final batch = _db.batch();
slice.forEach((entry) => _batchInsertEntry(batch, entry));
await batch.commit(noResult: true);
inserted += slice.length;
});
debugPrint('$runtimeType saveEntries complete in ${stopwatch.elapsed.inMilliseconds}ms for ${entries.length} entries');
}
@override
Future<void> updateEntry(int id, AvesEntry entry) async {
final batch = _db.batch();
batch.delete(entryTable, where: 'id = ?', whereArgs: [id]);
_batchInsertEntry(batch, entry);
await batch.commit(noResult: true);
}
void _batchInsertEntry(Batch batch, AvesEntry entry) {
batch.insert(
entryTable,
entry.toDatabaseMap(),
conflictAlgorithm: ConflictAlgorithm.replace,
);
}
@override
Future<Set<AvesEntry>> searchLiveEntries(String query, {int? limit}) async {
final rows = await _db.query(
entryTable,
where: '(title LIKE ? OR path LIKE ?) AND trashed = ?',
whereArgs: ['%$query%', '%$query%', 0],
orderBy: 'sourceDateTakenMillis DESC',
limit: limit,
);
return rows.map(AvesEntry.fromMap).toSet();
}
@override
Future<Set<AvesEntry>> searchLiveDuplicates(int origin, Set<AvesEntry>? entries) async {
String where = 'origin = ? AND trashed = ?';
if (entries != null) {
where += ' AND contentId IN (${entries.map((v) => v.contentId).join(',')})';
}
final rows = await _db.rawQuery(
'SELECT *, MAX(id) AS id'
' FROM $entryTable'
' WHERE $where'
' GROUP BY contentId'
' HAVING COUNT(id) > 1',
[origin, 0],
);
final duplicates = rows.map(AvesEntry.fromMap).toSet();
if (duplicates.isNotEmpty) {
debugPrint('$runtimeType found duplicates=$duplicates');
}
return duplicates;
}
// date taken
@override
Future<void> clearDates() async {
final count = await _db.delete(dateTakenTable, where: '1');
debugPrint('$runtimeType clearDates deleted $count rows');
}
@override
Future<Map<int?, int?>> loadDates() async {
final result = <int?, int?>{};
final cursor = await _db.queryCursor(dateTakenTable, bufferSize: _queryCursorBufferSize);
while (await cursor.moveNext()) {
final row = cursor.current;
result[row['id'] as int] = row['dateMillis'] as int? ?? 0;
}
return result;
}
// catalog metadata
@override
Future<void> clearCatalogMetadata() async {
final count = await _db.delete(metadataTable, where: '1');
debugPrint('$runtimeType clearMetadataEntries deleted $count rows');
}
@override
Future<Set<CatalogMetadata>> loadCatalogMetadata() async {
final result = <CatalogMetadata>{};
final cursor = await _db.queryCursor(metadataTable, bufferSize: _queryCursorBufferSize);
while (await cursor.moveNext()) {
result.add(CatalogMetadata.fromMap(cursor.current));
}
return result;
}
@override
Future<Set<CatalogMetadata>> loadCatalogMetadataById(Set<int> ids) => _getByIds(ids, metadataTable, CatalogMetadata.fromMap);
@override
Future<void> saveCatalogMetadata(Set<CatalogMetadata> metadataEntries) async {
if (metadataEntries.isEmpty) return;
final stopwatch = Stopwatch()..start();
try {
final batch = _db.batch();
metadataEntries.forEach((metadata) => _batchInsertMetadata(batch, metadata));
await batch.commit(noResult: true);
debugPrint('$runtimeType saveMetadata complete in ${stopwatch.elapsed.inMilliseconds}ms for ${metadataEntries.length} entries');
} catch (error, stack) {
debugPrint('$runtimeType failed to save metadata with error=$error\n$stack');
}
}
@override
Future<void> updateCatalogMetadata(int id, CatalogMetadata? metadata) async {
final batch = _db.batch();
batch.delete(dateTakenTable, where: 'id = ?', whereArgs: [id]);
batch.delete(metadataTable, where: 'id = ?', whereArgs: [id]);
_batchInsertMetadata(batch, metadata);
await batch.commit(noResult: true);
}
void _batchInsertMetadata(Batch batch, CatalogMetadata? metadata) {
if (metadata == null) return;
if (metadata.dateMillis != 0) {
batch.insert(
dateTakenTable,
{
'id': metadata.id,
'dateMillis': metadata.dateMillis,
},
conflictAlgorithm: ConflictAlgorithm.replace,
);
}
batch.insert(
metadataTable,
metadata.toMap(),
conflictAlgorithm: ConflictAlgorithm.replace,
);
}
// address
@override
Future<void> clearAddresses() async {
final count = await _db.delete(addressTable, where: '1');
debugPrint('$runtimeType clearAddresses deleted $count rows');
}
@override
Future<Set<AddressDetails>> loadAddresses() async {
final result = <AddressDetails>{};
final cursor = await _db.queryCursor(addressTable, bufferSize: _queryCursorBufferSize);
while (await cursor.moveNext()) {
result.add(AddressDetails.fromMap(cursor.current));
}
return result;
}
@override
Future<Set<AddressDetails>> loadAddressesById(Set<int> ids) => _getByIds(ids, addressTable, AddressDetails.fromMap);
@override
Future<void> saveAddresses(Set<AddressDetails> addresses) async {
if (addresses.isEmpty) return;
final stopwatch = Stopwatch()..start();
final batch = _db.batch();
addresses.forEach((address) => _batchInsertAddress(batch, address));
await batch.commit(noResult: true);
debugPrint('$runtimeType saveAddresses complete in ${stopwatch.elapsed.inMilliseconds}ms for ${addresses.length} entries');
}
@override
Future<void> updateAddress(int id, AddressDetails? address) async {
final batch = _db.batch();
batch.delete(addressTable, where: 'id = ?', whereArgs: [id]);
_batchInsertAddress(batch, address);
await batch.commit(noResult: true);
}
void _batchInsertAddress(Batch batch, AddressDetails? address) {
if (address == null) return;
batch.insert(
addressTable,
address.toMap(),
conflictAlgorithm: ConflictAlgorithm.replace,
);
}
// vaults
@override
Future<void> clearVaults() async {
final count = await _db.delete(vaultTable, where: '1');
debugPrint('$runtimeType clearVaults deleted $count rows');
}
@override
Future<Set<VaultDetails>> loadAllVaults() async {
final result = <VaultDetails>{};
final cursor = await _db.queryCursor(vaultTable, bufferSize: _queryCursorBufferSize);
while (await cursor.moveNext()) {
result.add(VaultDetails.fromMap(cursor.current));
}
return result;
}
@override
Future<void> addVaults(Set<VaultDetails> rows) async {
if (rows.isEmpty) return;
final batch = _db.batch();
rows.forEach((row) => _batchInsertVault(batch, row));
await batch.commit(noResult: true);
}
@override
Future<void> updateVault(String oldName, VaultDetails row) async {
final batch = _db.batch();
batch.delete(vaultTable, where: 'name = ?', whereArgs: [oldName]);
_batchInsertVault(batch, row);
await batch.commit(noResult: true);
}
void _batchInsertVault(Batch batch, VaultDetails row) {
batch.insert(
vaultTable,
row.toMap(),
conflictAlgorithm: ConflictAlgorithm.replace,
);
}
@override
Future<void> removeVaults(Set<VaultDetails> rows) async {
if (rows.isEmpty) return;
final batch = _db.batch();
rows.map((v) => v.name).forEach((name) => batch.delete(vaultTable, where: 'name = ?', whereArgs: [name]));
await batch.commit(noResult: true);
}
// trash
@override
Future<void> clearTrashDetails() async {
final count = await _db.delete(trashTable, where: '1');
debugPrint('$runtimeType clearTrashDetails deleted $count rows');
}
@override
Future<Set<TrashDetails>> loadAllTrashDetails() async {
final result = <TrashDetails>{};
final cursor = await _db.queryCursor(trashTable, bufferSize: _queryCursorBufferSize);
while (await cursor.moveNext()) {
result.add(TrashDetails.fromMap(cursor.current));
}
return result;
}
@override
Future<void> updateTrash(int id, TrashDetails? details) async {
final batch = _db.batch();
batch.delete(trashTable, where: 'id = ?', whereArgs: [id]);
_batchInsertTrashDetails(batch, details);
await batch.commit(noResult: true);
}
void _batchInsertTrashDetails(Batch batch, TrashDetails? details) {
if (details == null) return;
batch.insert(
trashTable,
details.toMap(),
conflictAlgorithm: ConflictAlgorithm.replace,
);
}
// favourites
@override
Future<void> clearFavourites() async {
final count = await _db.delete(favouriteTable, where: '1');
debugPrint('$runtimeType clearFavourites deleted $count rows');
}
@override
Future<Set<FavouriteRow>> loadAllFavourites() async {
final result = <FavouriteRow>{};
final cursor = await _db.queryCursor(favouriteTable, bufferSize: _queryCursorBufferSize);
while (await cursor.moveNext()) {
result.add(FavouriteRow.fromMap(cursor.current));
}
return result;
}
@override
Future<void> addFavourites(Set<FavouriteRow> rows) async {
if (rows.isEmpty) return;
final batch = _db.batch();
rows.forEach((row) => _batchInsertFavourite(batch, row));
await batch.commit(noResult: true);
}
@override
Future<void> updateFavouriteId(int id, FavouriteRow row) async {
final batch = _db.batch();
batch.delete(favouriteTable, where: 'id = ?', whereArgs: [id]);
_batchInsertFavourite(batch, row);
await batch.commit(noResult: true);
}
void _batchInsertFavourite(Batch batch, FavouriteRow row) {
batch.insert(
favouriteTable,
row.toMap(),
conflictAlgorithm: ConflictAlgorithm.replace,
);
}
@override
Future<void> removeFavourites(Set<FavouriteRow> rows) async {
if (rows.isEmpty) return;
final ids = rows.map((row) => row.entryId).whereType<int>().toSet();
if (ids.isEmpty) return;
final batch = _db.batch();
for (final id in ids) {
batch.delete(favouriteTable, where: 'id = ?', whereArgs: [id]);
}
await batch.commit(noResult: true);
}
// covers
@override
Future<void> clearCovers() async {
final count = await _db.delete(coverTable, where: '1');
debugPrint('$runtimeType clearCovers deleted $count rows');
}
@override
Future<Set<CoverRow>> loadAllCovers() async {
final result = <CoverRow>{};
final cursor = await _db.queryCursor(coverTable, bufferSize: _queryCursorBufferSize);
while (await cursor.moveNext()) {
final rowMap = cursor.current;
final row = CoverRow.fromMap(rowMap);
if (row != null) {
result.add(row);
} else {
debugPrint('$runtimeType failed to deserialize cover from row=$rowMap');
}
}
return result;
}
@override
Future<void> addCovers(Set<CoverRow> rows) async {
if (rows.isEmpty) return;
final batch = _db.batch();
rows.forEach((row) => _batchInsertCover(batch, row));
await batch.commit(noResult: true);
}
@override
Future<void> updateCoverEntryId(int id, CoverRow row) async {
final batch = _db.batch();
batch.delete(coverTable, where: 'entryId = ?', whereArgs: [id]);
_batchInsertCover(batch, row);
await batch.commit(noResult: true);
}
void _batchInsertCover(Batch batch, CoverRow row) {
batch.insert(
coverTable,
row.toMap(),
conflictAlgorithm: ConflictAlgorithm.replace,
);
}
@override
Future<void> removeCovers(Set<CollectionFilter> filters) async {
if (filters.isEmpty) return;
// for backward compatibility, remove stored JSON instead of removing de/reserialized filters
final obsoleteFilterJson = <String>{};
final rows = await _db.query(coverTable);
rows.forEach((row) {
final filterJson = row['filter'] as String?;
if (filterJson != null) {
final filter = CollectionFilter.fromJson(filterJson);
if (filters.any((v) => filter == v)) {
obsoleteFilterJson.add(filterJson);
}
}
});
final batch = _db.batch();
obsoleteFilterJson.forEach((filterJson) => batch.delete(coverTable, where: 'filter = ?', whereArgs: [filterJson]));
await batch.commit(noResult: true);
}
// dynamic albums
@override
Future<int> clearDynamicAlbums() async {
final count = await _db.delete(dynamicAlbumTable, where: '1');
debugPrint('$runtimeType clearDynamicAlbums deleted $count rows');
return count;
}
@override
Future<Set<DynamicAlbumRow>> loadAllDynamicAlbums({int bufferSize = _queryCursorBufferSize}) async {
final result = <DynamicAlbumRow>{};
try {
final cursor = await _db.queryCursor(dynamicAlbumTable, bufferSize: bufferSize);
while (await cursor.moveNext()) {
final rowMap = cursor.current;
final row = DynamicAlbumRow.fromMap(rowMap);
if (row != null) {
result.add(row);
} else {
debugPrint('$runtimeType failed to deserialize dynamic album from row=$rowMap');
}
}
} catch (error, stack) {
debugPrint('$runtimeType failed to query table=$dynamicAlbumTable error=$error\n$stack');
if (bufferSize > 1) {
debugPrint('$runtimeType retry to query table=$dynamicAlbumTable with no cursor buffer');
final safeRows = await loadAllDynamicAlbums(bufferSize: 1);
final clearedCount = await clearDynamicAlbums();
await addDynamicAlbums(safeRows);
final addedCount = safeRows.length;
final lostCount = clearedCount - addedCount;
debugPrint('$runtimeType kept $addedCount rows, lost $lostCount rows from table=$dynamicAlbumTable');
return safeRows;
}
}
return result;
}
@override
Future<void> addDynamicAlbums(Set<DynamicAlbumRow> rows) async {
if (rows.isEmpty) return;
final batch = _db.batch();
rows.forEach((row) => _batchInsertDynamicAlbum(batch, row));
await batch.commit(noResult: true);
}
void _batchInsertDynamicAlbum(Batch batch, DynamicAlbumRow row) {
batch.insert(
dynamicAlbumTable,
row.toMap(),
conflictAlgorithm: ConflictAlgorithm.replace,
);
}
@override
Future<void> removeDynamicAlbums(Set<String> names) async {
if (names.isEmpty) return;
final batch = _db.batch();
names.forEach((name) => batch.delete(dynamicAlbumTable, where: 'name = ?', whereArgs: [name]));
await batch.commit(noResult: true);
}
// video playback
@override
Future<void> clearVideoPlayback() async {
final count = await _db.delete(videoPlaybackTable, where: '1');
debugPrint('$runtimeType clearVideoPlayback deleted $count rows');
}
@override
Future<Set<VideoPlaybackRow>> loadAllVideoPlayback() async {
final result = <VideoPlaybackRow>{};
final cursor = await _db.queryCursor(videoPlaybackTable, bufferSize: _queryCursorBufferSize);
while (await cursor.moveNext()) {
final rowMap = cursor.current;
final row = VideoPlaybackRow.fromMap(rowMap);
if (row != null) {
result.add(row);
} else {
debugPrint('$runtimeType failed to deserialize video playback from row=$rowMap');
}
}
return result;
}
@override
Future<VideoPlaybackRow?> loadVideoPlayback(int id) async {
final rows = await _db.query(videoPlaybackTable, where: 'id = ?', whereArgs: [id]);
if (rows.isEmpty) return null;
return VideoPlaybackRow.fromMap(rows.first);
}
@override
Future<void> addVideoPlayback(Set<VideoPlaybackRow> rows) async {
if (rows.isEmpty) return;
final batch = _db.batch();
rows.forEach((row) => _batchInsertVideoPlayback(batch, row));
await batch.commit(noResult: true);
}
void _batchInsertVideoPlayback(Batch batch, VideoPlaybackRow row) {
batch.insert(
videoPlaybackTable,
row.toMap(),
conflictAlgorithm: ConflictAlgorithm.replace,
);
}
@override
Future<void> removeVideoPlayback(Set<int> ids) async {
if (ids.isEmpty) return;
final batch = _db.batch();
ids.forEach((id) => batch.delete(videoPlaybackTable, where: 'id = ?', whereArgs: [id]));
await batch.commit(noResult: true);
}
// convenience methods
Future<Set<T>> _getByIds<T>(Set<int> ids, String table, T Function(Map<String, Object?> row) mapRow) async {
final result = <T>{};
if (ids.isNotEmpty) {
final cursor = await _db.queryCursor(table, where: 'id IN (${ids.join(',')})', bufferSize: _queryCursorBufferSize);
while (await cursor.moveNext()) {
result.add(mapRow(cursor.current));
}
}
return result;
}
// --------------------------------------------------------
// MIGRAZIONE "GUARDIA" PER COLONNE REMOTE
// --------------------------------------------------------
Future<void> _ensureRemoteColumns(Database db) async {
Future<Set<String>> _columnsOf(String table) async {
final rows = await db.rawQuery('PRAGMA table_info($table)');
return rows.map((r) => (r['name'] as String).toLowerCase()).toSet();
}
final cols = await _columnsOf(entryTable);
final alters = <String>[
if (!cols.contains('origin')) "ALTER TABLE $entryTable ADD COLUMN origin INTEGER",
if (!cols.contains('trashed')) "ALTER TABLE $entryTable ADD COLUMN trashed INTEGER",
if (!cols.contains('provider')) "ALTER TABLE $entryTable ADD COLUMN provider TEXT",
if (!cols.contains('remoteid')) "ALTER TABLE $entryTable ADD COLUMN remoteId TEXT",
if (!cols.contains('remotepath')) "ALTER TABLE $entryTable ADD COLUMN remotePath TEXT",
if (!cols.contains('remotethumb1')) "ALTER TABLE $entryTable ADD COLUMN remoteThumb1 TEXT",
if (!cols.contains('remotethumb2')) "ALTER TABLE $entryTable ADD COLUMN remoteThumb2 TEXT",
if (!cols.contains('remotewidth')) "ALTER TABLE $entryTable ADD COLUMN remoteWidth INTEGER",
if (!cols.contains('remoteheight')) "ALTER TABLE $entryTable ADD COLUMN remoteHeight INTEGER",
if (!cols.contains('remoterotation')) "ALTER TABLE $entryTable ADD COLUMN remoteRotation INTEGER",
if (!cols.contains('latitude')) "ALTER TABLE $entryTable ADD COLUMN latitude REAL",
if (!cols.contains('longitude')) "ALTER TABLE $entryTable ADD COLUMN longitude REAL",
if (!cols.contains('altitude')) "ALTER TABLE $entryTable ADD COLUMN altitude REAL",
];
for (final sql in alters) {
try {
await db.execute(sql);
} catch (_) {
// idempotenza: ignora duplicate column name ecc.
}
}
// Indici utili ai remoti
try { await db.execute("CREATE INDEX IF NOT EXISTS entry_remote_idx ON $entryTable(origin, remoteId)"); } catch (_) {}
try { await db.execute("CREATE INDEX IF NOT EXISTS entry_origin_trashed_idx ON $entryTable(origin, trashed)"); } catch (_) {}
}
}