001/**
002 * Licensed to the Apache Software Foundation (ASF) under one or more
003 * contributor license agreements.  See the NOTICE file distributed with
004 * this work for additional information regarding copyright ownership.
005 * The ASF licenses this file to You under the Apache License, Version 2.0
006 * (the "License"); you may not use this file except in compliance with
007 * the License.  You may obtain a copy of the License at
008 *
009 *      http://www.apache.org/licenses/LICENSE-2.0
010 *
011 * Unless required by applicable law or agreed to in writing, software
012 * distributed under the License is distributed on an "AS IS" BASIS,
013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
014 * See the License for the specific language governing permissions and
015 * limitations under the License.
016 */
017package org.apache.activemq.store.kahadb;
018
019import static org.apache.activemq.store.kahadb.disk.journal.Location.NOT_SET;
020
021import java.io.ByteArrayInputStream;
022import java.io.ByteArrayOutputStream;
023import java.io.DataInput;
024import java.io.DataOutput;
025import java.io.EOFException;
026import java.io.File;
027import java.io.IOException;
028import java.io.InputStream;
029import java.io.InterruptedIOException;
030import java.io.ObjectInputStream;
031import java.io.ObjectOutputStream;
032import java.io.OutputStream;
033import java.util.ArrayList;
034import java.util.Arrays;
035import java.util.Collection;
036import java.util.Collections;
037import java.util.Date;
038import java.util.HashMap;
039import java.util.HashSet;
040import java.util.Iterator;
041import java.util.LinkedHashMap;
042import java.util.LinkedHashSet;
043import java.util.LinkedList;
044import java.util.List;
045import java.util.Map;
046import java.util.Map.Entry;
047import java.util.Set;
048import java.util.SortedSet;
049import java.util.TreeMap;
050import java.util.TreeSet;
051import java.util.concurrent.ConcurrentHashMap;
052import java.util.concurrent.ConcurrentMap;
053import java.util.concurrent.Executors;
054import java.util.concurrent.ScheduledExecutorService;
055import java.util.concurrent.ThreadFactory;
056import java.util.concurrent.TimeUnit;
057import java.util.concurrent.atomic.AtomicBoolean;
058import java.util.concurrent.atomic.AtomicLong;
059import java.util.concurrent.atomic.AtomicReference;
060import java.util.concurrent.locks.ReentrantReadWriteLock;
061
062import org.apache.activemq.ActiveMQMessageAuditNoSync;
063import org.apache.activemq.broker.BrokerService;
064import org.apache.activemq.broker.BrokerServiceAware;
065import org.apache.activemq.broker.region.Destination;
066import org.apache.activemq.broker.region.Queue;
067import org.apache.activemq.broker.region.Topic;
068import org.apache.activemq.command.MessageAck;
069import org.apache.activemq.command.TransactionId;
070import org.apache.activemq.openwire.OpenWireFormat;
071import org.apache.activemq.protobuf.Buffer;
072import org.apache.activemq.store.MessageStore;
073import org.apache.activemq.store.MessageStoreStatistics;
074import org.apache.activemq.store.MessageStoreSubscriptionStatistics;
075import org.apache.activemq.store.TopicMessageStore;
076import org.apache.activemq.store.kahadb.data.KahaAckMessageFileMapCommand;
077import org.apache.activemq.store.kahadb.data.KahaAddMessageCommand;
078import org.apache.activemq.store.kahadb.data.KahaCommitCommand;
079import org.apache.activemq.store.kahadb.data.KahaDestination;
080import org.apache.activemq.store.kahadb.data.KahaEntryType;
081import org.apache.activemq.store.kahadb.data.KahaPrepareCommand;
082import org.apache.activemq.store.kahadb.data.KahaProducerAuditCommand;
083import org.apache.activemq.store.kahadb.data.KahaRemoveDestinationCommand;
084import org.apache.activemq.store.kahadb.data.KahaRemoveMessageCommand;
085import org.apache.activemq.store.kahadb.data.KahaRewrittenDataFileCommand;
086import org.apache.activemq.store.kahadb.data.KahaRollbackCommand;
087import org.apache.activemq.store.kahadb.data.KahaSubscriptionCommand;
088import org.apache.activemq.store.kahadb.data.KahaTraceCommand;
089import org.apache.activemq.store.kahadb.data.KahaTransactionInfo;
090import org.apache.activemq.store.kahadb.data.KahaUpdateMessageCommand;
091import org.apache.activemq.store.kahadb.disk.index.BTreeIndex;
092import org.apache.activemq.store.kahadb.disk.index.BTreeVisitor;
093import org.apache.activemq.store.kahadb.disk.index.ListIndex;
094import org.apache.activemq.store.kahadb.disk.journal.DataFile;
095import org.apache.activemq.store.kahadb.disk.journal.Journal;
096import org.apache.activemq.store.kahadb.disk.journal.Journal.JournalDiskSyncStrategy;
097import org.apache.activemq.store.kahadb.disk.journal.Location;
098import org.apache.activemq.store.kahadb.disk.journal.TargetedDataFileAppender;
099import org.apache.activemq.store.kahadb.disk.page.Page;
100import org.apache.activemq.store.kahadb.disk.page.PageFile;
101import org.apache.activemq.store.kahadb.disk.page.Transaction;
102import org.apache.activemq.store.kahadb.disk.util.LocationMarshaller;
103import org.apache.activemq.store.kahadb.disk.util.LongMarshaller;
104import org.apache.activemq.store.kahadb.disk.util.Marshaller;
105import org.apache.activemq.store.kahadb.disk.util.Sequence;
106import org.apache.activemq.store.kahadb.disk.util.SequenceSet;
107import org.apache.activemq.store.kahadb.disk.util.StringMarshaller;
108import org.apache.activemq.store.kahadb.disk.util.VariableMarshaller;
109import org.apache.activemq.util.ByteSequence;
110import org.apache.activemq.util.DataByteArrayInputStream;
111import org.apache.activemq.util.DataByteArrayOutputStream;
112import org.apache.activemq.util.IOExceptionSupport;
113import org.apache.activemq.util.IOHelper;
114import org.apache.activemq.util.ServiceStopper;
115import org.apache.activemq.util.ServiceSupport;
116import org.apache.activemq.util.ThreadPoolUtils;
117import org.slf4j.Logger;
118import org.slf4j.LoggerFactory;
119import org.slf4j.MDC;
120
121public abstract class MessageDatabase extends ServiceSupport implements BrokerServiceAware {
122
123    protected BrokerService brokerService;
124
125    public static final String PROPERTY_LOG_SLOW_ACCESS_TIME = "org.apache.activemq.store.kahadb.LOG_SLOW_ACCESS_TIME";
126    public static final int LOG_SLOW_ACCESS_TIME = Integer.getInteger(PROPERTY_LOG_SLOW_ACCESS_TIME, 0);
127    public static final File DEFAULT_DIRECTORY = new File("KahaDB");
128    protected static final Buffer UNMATCHED;
129    static {
130        UNMATCHED = new Buffer(new byte[]{});
131    }
132    private static final Logger LOG = LoggerFactory.getLogger(MessageDatabase.class);
133
134    static final int CLOSED_STATE = 1;
135    static final int OPEN_STATE = 2;
136    static final long NOT_ACKED = -1;
137
138    static final int VERSION = 6;
139
140    static final byte COMPACTED_JOURNAL_FILE = DataFile.STANDARD_LOG_FILE + 1;
141
142    protected class Metadata {
143        protected Page<Metadata> page;
144        protected int state;
145        protected BTreeIndex<String, StoredDestination> destinations;
146        protected Location lastUpdate;
147        protected Location firstInProgressTransactionLocation;
148        protected Location producerSequenceIdTrackerLocation = null;
149        protected Location ackMessageFileMapLocation = null;
150        protected transient ActiveMQMessageAuditNoSync producerSequenceIdTracker = new ActiveMQMessageAuditNoSync();
151        protected transient Map<Integer, Set<Integer>> ackMessageFileMap = new HashMap<>();
152        protected int version = VERSION;
153        protected int openwireVersion = OpenWireFormat.DEFAULT_STORE_VERSION;
154
155        public void read(DataInput is) throws IOException {
156            state = is.readInt();
157            destinations = new BTreeIndex<>(pageFile, is.readLong());
158            if (is.readBoolean()) {
159                lastUpdate = LocationMarshaller.INSTANCE.readPayload(is);
160            } else {
161                lastUpdate = null;
162            }
163            if (is.readBoolean()) {
164                firstInProgressTransactionLocation = LocationMarshaller.INSTANCE.readPayload(is);
165            } else {
166                firstInProgressTransactionLocation = null;
167            }
168            try {
169                if (is.readBoolean()) {
170                    producerSequenceIdTrackerLocation = LocationMarshaller.INSTANCE.readPayload(is);
171                } else {
172                    producerSequenceIdTrackerLocation = null;
173                }
174            } catch (EOFException expectedOnUpgrade) {
175            }
176            try {
177                version = is.readInt();
178            } catch (EOFException expectedOnUpgrade) {
179                version = 1;
180            }
181            if (version >= 5 && is.readBoolean()) {
182                ackMessageFileMapLocation = LocationMarshaller.INSTANCE.readPayload(is);
183            } else {
184                ackMessageFileMapLocation = null;
185            }
186            try {
187                openwireVersion = is.readInt();
188            } catch (EOFException expectedOnUpgrade) {
189                openwireVersion = OpenWireFormat.DEFAULT_LEGACY_VERSION;
190            }
191            LOG.info("KahaDB is version " + version);
192        }
193
194        public void write(DataOutput os) throws IOException {
195            os.writeInt(state);
196            os.writeLong(destinations.getPageId());
197
198            if (lastUpdate != null) {
199                os.writeBoolean(true);
200                LocationMarshaller.INSTANCE.writePayload(lastUpdate, os);
201            } else {
202                os.writeBoolean(false);
203            }
204
205            if (firstInProgressTransactionLocation != null) {
206                os.writeBoolean(true);
207                LocationMarshaller.INSTANCE.writePayload(firstInProgressTransactionLocation, os);
208            } else {
209                os.writeBoolean(false);
210            }
211
212            if (producerSequenceIdTrackerLocation != null) {
213                os.writeBoolean(true);
214                LocationMarshaller.INSTANCE.writePayload(producerSequenceIdTrackerLocation, os);
215            } else {
216                os.writeBoolean(false);
217            }
218            os.writeInt(VERSION);
219            if (ackMessageFileMapLocation != null) {
220                os.writeBoolean(true);
221                LocationMarshaller.INSTANCE.writePayload(ackMessageFileMapLocation, os);
222            } else {
223                os.writeBoolean(false);
224            }
225            os.writeInt(this.openwireVersion);
226        }
227    }
228
229    class MetadataMarshaller extends VariableMarshaller<Metadata> {
230        @Override
231        public Metadata readPayload(DataInput dataIn) throws IOException {
232            Metadata rc = createMetadata();
233            rc.read(dataIn);
234            return rc;
235        }
236
237        @Override
238        public void writePayload(Metadata object, DataOutput dataOut) throws IOException {
239            object.write(dataOut);
240        }
241    }
242
243    public enum PurgeRecoveredXATransactionStrategy {
244        NEVER,
245        COMMIT,
246        ROLLBACK;
247    }
248
249    protected PageFile pageFile;
250    protected Journal journal;
251    protected Metadata metadata = new Metadata();
252
253    protected MetadataMarshaller metadataMarshaller = new MetadataMarshaller();
254
255    protected boolean failIfDatabaseIsLocked;
256
257    protected boolean deleteAllMessages;
258    protected File directory = DEFAULT_DIRECTORY;
259    protected File indexDirectory = null;
260    protected ScheduledExecutorService scheduler;
261    private final Object schedulerLock = new Object();
262
263    protected JournalDiskSyncStrategy journalDiskSyncStrategy = JournalDiskSyncStrategy.ALWAYS;
264    protected boolean archiveDataLogs;
265    protected File directoryArchive;
266    protected AtomicLong journalSize = new AtomicLong(0);
267    long journalDiskSyncInterval = 1000;
268    long checkpointInterval = 5*1000;
269    long cleanupInterval = 30*1000;
270    int journalMaxFileLength = Journal.DEFAULT_MAX_FILE_LENGTH;
271    int journalMaxWriteBatchSize = Journal.DEFAULT_MAX_WRITE_BATCH_SIZE;
272    boolean enableIndexWriteAsync = false;
273    int setIndexWriteBatchSize = PageFile.DEFAULT_WRITE_BATCH_SIZE;
274    private String preallocationScope = Journal.PreallocationScope.ENTIRE_JOURNAL.name();
275    private String preallocationStrategy = Journal.PreallocationStrategy.SPARSE_FILE.name();
276
277    protected AtomicBoolean opened = new AtomicBoolean();
278    private boolean ignoreMissingJournalfiles = false;
279    private int indexCacheSize = 10000;
280    private boolean checkForCorruptJournalFiles = false;
281    protected PurgeRecoveredXATransactionStrategy purgeRecoveredXATransactionStrategy = PurgeRecoveredXATransactionStrategy.NEVER;
282    private boolean checksumJournalFiles = true;
283    protected boolean forceRecoverIndex = false;
284    private boolean archiveCorruptedIndex = false;
285    private boolean useIndexLFRUEviction = false;
286    private float indexLFUEvictionFactor = 0.2f;
287    private boolean enableIndexDiskSyncs = true;
288    private boolean enableIndexRecoveryFile = true;
289    private boolean enableIndexPageCaching = true;
290    ReentrantReadWriteLock checkpointLock = new ReentrantReadWriteLock();
291
292    private boolean enableAckCompaction = true;
293    private int compactAcksAfterNoGC = 10;
294    private boolean compactAcksIgnoresStoreGrowth = false;
295    private int checkPointCyclesWithNoGC;
296    private int journalLogOnLastCompactionCheck;
297    private boolean enableSubscriptionStatistics = false;
298
299    //only set when using JournalDiskSyncStrategy.PERIODIC
300    protected final AtomicReference<Location> lastAsyncJournalUpdate = new AtomicReference<>();
301
302    @Override
303    public void doStart() throws Exception {
304        load();
305    }
306
307    @Override
308    public void doStop(ServiceStopper stopper) throws Exception {
309        unload();
310    }
311
312    public void allowIOResumption() {
313        if (pageFile != null) {
314            pageFile.allowIOResumption();
315        }
316        if (journal != null) {
317            journal.allowIOResumption();
318        }
319    }
320
321    private void loadPageFile() throws IOException {
322        this.indexLock.writeLock().lock();
323        try {
324            final PageFile pageFile = getPageFile();
325            pageFile.load();
326            pageFile.tx().execute(new Transaction.Closure<IOException>() {
327                @Override
328                public void execute(Transaction tx) throws IOException {
329                    if (pageFile.getPageCount() == 0) {
330                        // First time this is created.. Initialize the metadata
331                        Page<Metadata> page = tx.allocate();
332                        assert page.getPageId() == 0;
333                        page.set(metadata);
334                        metadata.page = page;
335                        metadata.state = CLOSED_STATE;
336                        metadata.destinations = new BTreeIndex<>(pageFile, tx.allocate().getPageId());
337
338                        tx.store(metadata.page, metadataMarshaller, true);
339                    } else {
340                        Page<Metadata> page = tx.load(0, metadataMarshaller);
341                        metadata = page.get();
342                        metadata.page = page;
343                    }
344                    metadata.destinations.setKeyMarshaller(StringMarshaller.INSTANCE);
345                    metadata.destinations.setValueMarshaller(new StoredDestinationMarshaller());
346                    metadata.destinations.load(tx);
347                }
348            });
349            // Load up all the destinations since we need to scan all the indexes to figure out which journal files can be deleted.
350            // Perhaps we should just keep an index of file
351            storedDestinations.clear();
352            pageFile.tx().execute(new Transaction.Closure<IOException>() {
353                @Override
354                public void execute(Transaction tx) throws IOException {
355                    for (Iterator<Entry<String, StoredDestination>> iterator = metadata.destinations.iterator(tx); iterator.hasNext();) {
356                        Entry<String, StoredDestination> entry = iterator.next();
357                        StoredDestination sd = loadStoredDestination(tx, entry.getKey(), entry.getValue().subscriptions!=null);
358                        storedDestinations.put(entry.getKey(), sd);
359
360                        if (checkForCorruptJournalFiles) {
361                            // sanity check the index also
362                            if (!entry.getValue().locationIndex.isEmpty(tx)) {
363                                if (entry.getValue().orderIndex.nextMessageId <= 0) {
364                                    throw new IOException("Detected uninitialized orderIndex nextMessageId with pending messages for " + entry.getKey());
365                                }
366                            }
367                        }
368                    }
369                }
370            });
371            pageFile.flush();
372        } finally {
373            this.indexLock.writeLock().unlock();
374        }
375    }
376
377    private void startCheckpoint() {
378        if (checkpointInterval == 0 && cleanupInterval == 0) {
379            LOG.info("periodic checkpoint/cleanup disabled, will ocurr on clean shutdown/restart");
380            return;
381        }
382        synchronized (schedulerLock) {
383            if (scheduler == null || scheduler.isShutdown()) {
384                scheduler = Executors.newSingleThreadScheduledExecutor(new ThreadFactory() {
385
386                    @Override
387                    public Thread newThread(Runnable r) {
388                        Thread schedulerThread = new Thread(r);
389
390                        schedulerThread.setName("ActiveMQ Journal Checkpoint Worker");
391                        schedulerThread.setDaemon(true);
392
393                        return schedulerThread;
394                    }
395                });
396
397                // Short intervals for check-point and cleanups
398                long delay;
399                if (journal.isJournalDiskSyncPeriodic()) {
400                    delay = Math.min(journalDiskSyncInterval > 0 ? journalDiskSyncInterval : checkpointInterval, 500);
401                } else {
402                    delay = Math.min(checkpointInterval > 0 ? checkpointInterval : cleanupInterval, 500);
403                }
404
405                scheduler.scheduleWithFixedDelay(new CheckpointRunner(), 0, delay, TimeUnit.MILLISECONDS);
406            }
407        }
408    }
409
410    private final class CheckpointRunner implements Runnable {
411
412        private long lastCheckpoint = System.currentTimeMillis();
413        private long lastCleanup = System.currentTimeMillis();
414        private long lastSync = System.currentTimeMillis();
415        private Location lastAsyncUpdate = null;
416
417        @Override
418        public void run() {
419            try {
420                // Decide on cleanup vs full checkpoint here.
421                if (opened.get()) {
422                    long now = System.currentTimeMillis();
423                    if (journal.isJournalDiskSyncPeriodic() &&
424                            journalDiskSyncInterval > 0 && (now - lastSync >= journalDiskSyncInterval)) {
425                        Location currentUpdate = lastAsyncJournalUpdate.get();
426                        if (currentUpdate != null && !currentUpdate.equals(lastAsyncUpdate)) {
427                            lastAsyncUpdate = currentUpdate;
428                            if (LOG.isTraceEnabled()) {
429                                LOG.trace("Writing trace command to trigger journal sync");
430                            }
431                            store(new KahaTraceCommand(), true, null, null);
432                        }
433                        lastSync = now;
434                    }
435                    if (cleanupInterval > 0 && (now - lastCleanup >= cleanupInterval)) {
436                        checkpointCleanup(true);
437                        lastCleanup = now;
438                        lastCheckpoint = now;
439                    } else if (checkpointInterval > 0 && (now - lastCheckpoint >= checkpointInterval)) {
440                        checkpointCleanup(false);
441                        lastCheckpoint = now;
442                    }
443                }
444            } catch (IOException ioe) {
445                LOG.error("Checkpoint failed", ioe);
446                brokerService.handleIOException(ioe);
447            } catch (Throwable e) {
448                LOG.error("Checkpoint failed", e);
449                brokerService.handleIOException(IOExceptionSupport.create(e));
450            }
451        }
452    }
453
454    public void open() throws IOException {
455        if( opened.compareAndSet(false, true) ) {
456            getJournal().start();
457            try {
458                loadPageFile();
459            } catch (Throwable t) {
460                LOG.warn("Index corrupted. Recovering the index through journal replay. Cause:" + t);
461                if (LOG.isDebugEnabled()) {
462                    LOG.debug("Index load failure", t);
463                }
464                // try to recover index
465                try {
466                    pageFile.unload();
467                } catch (Exception ignore) {}
468                if (archiveCorruptedIndex) {
469                    pageFile.archive();
470                } else {
471                    pageFile.delete();
472                }
473                metadata = createMetadata();
474                //The metadata was recreated after a detect corruption so we need to
475                //reconfigure anything that was configured on the old metadata on startup
476                configureMetadata();
477                pageFile = null;
478                loadPageFile();
479            }
480            recover();
481            startCheckpoint();
482        }
483    }
484
485    public void load() throws IOException {
486        this.indexLock.writeLock().lock();
487        try {
488            IOHelper.mkdirs(directory);
489            if (deleteAllMessages) {
490                getJournal().setCheckForCorruptionOnStartup(false);
491                getJournal().start();
492                getJournal().delete();
493                getJournal().close();
494                journal = null;
495                getPageFile().delete();
496                LOG.info("Persistence store purged.");
497                deleteAllMessages = false;
498            }
499
500            open();
501            store(new KahaTraceCommand().setMessage("LOADED " + new Date()));
502        } finally {
503            this.indexLock.writeLock().unlock();
504        }
505    }
506
507    public void close() throws IOException, InterruptedException {
508        if (opened.compareAndSet(true, false)) {
509            checkpointLock.writeLock().lock();
510            try {
511                if (metadata.page != null) {
512                    checkpointUpdate(true);
513                }
514                pageFile.unload();
515                metadata = createMetadata();
516            } finally {
517                checkpointLock.writeLock().unlock();
518            }
519            journal.close();
520            synchronized(schedulerLock) {
521                if (scheduler != null) {
522                    ThreadPoolUtils.shutdownGraceful(scheduler, -1);
523                    scheduler = null;
524                }
525            }
526            // clear the cache and journalSize on shutdown of the store
527            storeCache.clear();
528            journalSize.set(0);
529        }
530    }
531
532    public void unload() throws IOException, InterruptedException {
533        this.indexLock.writeLock().lock();
534        try {
535            if( pageFile != null && pageFile.isLoaded() ) {
536                metadata.state = CLOSED_STATE;
537                metadata.firstInProgressTransactionLocation = getInProgressTxLocationRange()[0];
538
539                if (metadata.page != null) {
540                    pageFile.tx().execute(new Transaction.Closure<IOException>() {
541                        @Override
542                        public void execute(Transaction tx) throws IOException {
543                            tx.store(metadata.page, metadataMarshaller, true);
544                        }
545                    });
546                }
547            }
548        } finally {
549            this.indexLock.writeLock().unlock();
550        }
551        close();
552    }
553
554    // public for testing
555    @SuppressWarnings("rawtypes")
556    public Location[] getInProgressTxLocationRange() {
557        Location[] range = new Location[]{null, null};
558        synchronized (inflightTransactions) {
559            if (!inflightTransactions.isEmpty()) {
560                for (List<Operation> ops : inflightTransactions.values()) {
561                    if (!ops.isEmpty()) {
562                        trackMaxAndMin(range, ops);
563                    }
564                }
565            }
566            if (!preparedTransactions.isEmpty()) {
567                for (List<Operation> ops : preparedTransactions.values()) {
568                    if (!ops.isEmpty()) {
569                        trackMaxAndMin(range, ops);
570                    }
571                }
572            }
573        }
574        return range;
575    }
576
577    @SuppressWarnings("rawtypes")
578    private void trackMaxAndMin(Location[] range, List<Operation> ops) {
579        Location t = ops.get(0).getLocation();
580        if (range[0] == null || t.compareTo(range[0]) <= 0) {
581            range[0] = t;
582        }
583        t = ops.get(ops.size() -1).getLocation();
584        if (range[1] == null || t.compareTo(range[1]) >= 0) {
585            range[1] = t;
586        }
587    }
588
589    class TranInfo {
590        TransactionId id;
591        Location location;
592
593        class opCount {
594            int add;
595            int remove;
596        }
597        HashMap<KahaDestination, opCount> destinationOpCount = new HashMap<>();
598
599        @SuppressWarnings("rawtypes")
600        public void track(Operation operation) {
601            if (location == null ) {
602                location = operation.getLocation();
603            }
604            KahaDestination destination;
605            boolean isAdd = false;
606            if (operation instanceof AddOperation) {
607                AddOperation add = (AddOperation) operation;
608                destination = add.getCommand().getDestination();
609                isAdd = true;
610            } else {
611                RemoveOperation removeOpperation = (RemoveOperation) operation;
612                destination = removeOpperation.getCommand().getDestination();
613            }
614            opCount opCount = destinationOpCount.get(destination);
615            if (opCount == null) {
616                opCount = new opCount();
617                destinationOpCount.put(destination, opCount);
618            }
619            if (isAdd) {
620                opCount.add++;
621            } else {
622                opCount.remove++;
623            }
624        }
625
626        @Override
627        public String toString() {
628           StringBuffer buffer = new StringBuffer();
629           buffer.append(location).append(";").append(id).append(";\n");
630           for (Entry<KahaDestination, opCount> op : destinationOpCount.entrySet()) {
631               buffer.append(op.getKey()).append('+').append(op.getValue().add).append(',').append('-').append(op.getValue().remove).append(';');
632           }
633           return buffer.toString();
634        }
635    }
636
637    @SuppressWarnings("rawtypes")
638    public String getTransactions() {
639
640        ArrayList<TranInfo> infos = new ArrayList<>();
641        synchronized (inflightTransactions) {
642            if (!inflightTransactions.isEmpty()) {
643                for (Entry<TransactionId, List<Operation>> entry : inflightTransactions.entrySet()) {
644                    TranInfo info = new TranInfo();
645                    info.id = entry.getKey();
646                    for (Operation operation : entry.getValue()) {
647                        info.track(operation);
648                    }
649                    infos.add(info);
650                }
651            }
652        }
653        synchronized (preparedTransactions) {
654            if (!preparedTransactions.isEmpty()) {
655                for (Entry<TransactionId, List<Operation>> entry : preparedTransactions.entrySet()) {
656                    TranInfo info = new TranInfo();
657                    info.id = entry.getKey();
658                    for (Operation operation : entry.getValue()) {
659                        info.track(operation);
660                    }
661                    infos.add(info);
662                }
663            }
664        }
665        return infos.toString();
666    }
667
668    /**
669     * Move all the messages that were in the journal into long term storage. We
670     * just replay and do a checkpoint.
671     *
672     * @throws IOException
673     * @throws IOException
674     * @throws IllegalStateException
675     */
676    private void recover() throws IllegalStateException, IOException {
677        this.indexLock.writeLock().lock();
678        try {
679
680            long start = System.currentTimeMillis();
681            boolean requiresJournalReplay = recoverProducerAudit();
682            requiresJournalReplay |= recoverAckMessageFileMap();
683            Location lastIndoubtPosition = getRecoveryPosition();
684            Location recoveryPosition = requiresJournalReplay ? journal.getNextLocation(null) : lastIndoubtPosition;
685            if (recoveryPosition != null) {
686                int redoCounter = 0;
687                int dataFileRotationTracker = recoveryPosition.getDataFileId();
688                LOG.info("Recovering from the journal @" + recoveryPosition);
689                while (recoveryPosition != null) {
690                    try {
691                        JournalCommand<?> message = load(recoveryPosition);
692                        metadata.lastUpdate = recoveryPosition;
693                        process(message, recoveryPosition, lastIndoubtPosition);
694                        redoCounter++;
695                    } catch (IOException failedRecovery) {
696                        if (isIgnoreMissingJournalfiles()) {
697                            LOG.debug("Failed to recover data at position:" + recoveryPosition, failedRecovery);
698                            // track this dud location
699                            journal.corruptRecoveryLocation(recoveryPosition);
700                        } else {
701                            throw new IOException("Failed to recover data at position:" + recoveryPosition, failedRecovery);
702                        }
703                    }
704                    recoveryPosition = journal.getNextLocation(recoveryPosition);
705                    // hold on to the minimum number of open files during recovery
706                    if (recoveryPosition != null && dataFileRotationTracker != recoveryPosition.getDataFileId()) {
707                        dataFileRotationTracker = recoveryPosition.getDataFileId();
708                        journal.cleanup();
709                    }
710                    if (LOG.isInfoEnabled() && redoCounter % 100000 == 0) {
711                        LOG.info("@" + recoveryPosition + ", " + redoCounter + " entries recovered ..");
712                    }
713                }
714                if (LOG.isInfoEnabled()) {
715                    long end = System.currentTimeMillis();
716                    LOG.info("Recovery replayed " + redoCounter + " operations from the journal in " + ((end - start) / 1000.0f) + " seconds.");
717                }
718            }
719
720            // We may have to undo some index updates.
721            pageFile.tx().execute(new Transaction.Closure<IOException>() {
722                @Override
723                public void execute(Transaction tx) throws IOException {
724                    recoverIndex(tx);
725                }
726            });
727
728            // rollback any recovered inflight local transactions, and discard any inflight XA transactions.
729            Set<TransactionId> toRollback = new HashSet<>();
730            Set<TransactionId> toDiscard = new HashSet<>();
731            synchronized (inflightTransactions) {
732                for (Iterator<TransactionId> it = inflightTransactions.keySet().iterator(); it.hasNext(); ) {
733                    TransactionId id = it.next();
734                    if (id.isLocalTransaction()) {
735                        toRollback.add(id);
736                    } else {
737                        toDiscard.add(id);
738                    }
739                }
740                for (TransactionId tx: toRollback) {
741                    if (LOG.isDebugEnabled()) {
742                        LOG.debug("rolling back recovered indoubt local transaction " + tx);
743                    }
744                    store(new KahaRollbackCommand().setTransactionInfo(TransactionIdConversion.convertToLocal(tx)), false, null, null);
745                }
746                for (TransactionId tx: toDiscard) {
747                    if (LOG.isDebugEnabled()) {
748                        LOG.debug("discarding recovered in-flight XA transaction " + tx);
749                    }
750                    inflightTransactions.remove(tx);
751                }
752            }
753
754            synchronized (preparedTransactions) {
755                Set<TransactionId> txIds = new LinkedHashSet<TransactionId>(preparedTransactions.keySet());
756                for (TransactionId txId : txIds) {
757                    switch (purgeRecoveredXATransactionStrategy){
758                        case NEVER:
759                            LOG.warn("Recovered prepared XA TX: [{}]", txId);
760                            break;
761                        case COMMIT:
762                            store(new KahaCommitCommand().setTransactionInfo(TransactionIdConversion.convert(txId)), false, null, null);
763                            LOG.warn("Recovered and Committing prepared XA TX: [{}]", txId);
764                            break;
765                        case ROLLBACK:
766                            store(new KahaRollbackCommand().setTransactionInfo(TransactionIdConversion.convert(txId)), false, null, null);
767                            LOG.warn("Recovered and Rolling Back prepared XA TX: [{}]", txId);
768                            break;
769                    }
770                }
771            }
772
773        } finally {
774            this.indexLock.writeLock().unlock();
775        }
776    }
777
778    @SuppressWarnings("unused")
779    private KahaTransactionInfo createLocalTransactionInfo(TransactionId tx) {
780        return TransactionIdConversion.convertToLocal(tx);
781    }
782
783    private Location minimum(Location x,
784                             Location y) {
785        Location min = null;
786        if (x != null) {
787            min = x;
788            if (y != null) {
789                int compare = y.compareTo(x);
790                if (compare < 0) {
791                    min = y;
792                }
793            }
794        } else {
795            min = y;
796        }
797        return min;
798    }
799
800    private boolean recoverProducerAudit() throws IOException {
801        boolean requiresReplay = true;
802        if (metadata.producerSequenceIdTrackerLocation != null) {
803            try {
804                KahaProducerAuditCommand audit = (KahaProducerAuditCommand) load(metadata.producerSequenceIdTrackerLocation);
805                ObjectInputStream objectIn = new ObjectInputStream(audit.getAudit().newInput());
806                int maxNumProducers = getMaxFailoverProducersToTrack();
807                int maxAuditDepth = getFailoverProducersAuditDepth();
808                metadata.producerSequenceIdTracker = (ActiveMQMessageAuditNoSync) objectIn.readObject();
809                metadata.producerSequenceIdTracker.setAuditDepth(maxAuditDepth);
810                metadata.producerSequenceIdTracker.setMaximumNumberOfProducersToTrack(maxNumProducers);
811                requiresReplay = false;
812            } catch (Exception e) {
813                LOG.warn("Cannot recover message audit", e);
814            }
815        }
816        // got no audit stored so got to recreate via replay from start of the journal
817        return requiresReplay;
818    }
819
820    @SuppressWarnings("unchecked")
821    private boolean recoverAckMessageFileMap() throws IOException {
822        boolean requiresReplay = true;
823        if (metadata.ackMessageFileMapLocation != null) {
824            try {
825                KahaAckMessageFileMapCommand audit = (KahaAckMessageFileMapCommand) load(metadata.ackMessageFileMapLocation);
826                ObjectInputStream objectIn = new ObjectInputStream(audit.getAckMessageFileMap().newInput());
827                metadata.ackMessageFileMap = (Map<Integer, Set<Integer>>) objectIn.readObject();
828                requiresReplay = false;
829            } catch (Exception e) {
830                LOG.warn("Cannot recover ackMessageFileMap", e);
831            }
832        }
833        // got no ackMessageFileMap stored so got to recreate via replay from start of the journal
834        return requiresReplay;
835    }
836
837    protected void recoverIndex(Transaction tx) throws IOException {
838        long start = System.currentTimeMillis();
839        // It is possible index updates got applied before the journal updates..
840        // in that case we need to removed references to messages that are not in the journal
841        final Location lastAppendLocation = journal.getLastAppendLocation();
842        long undoCounter=0;
843
844        // Go through all the destinations to see if they have messages past the lastAppendLocation
845        for (String key : storedDestinations.keySet()) {
846            StoredDestination sd = storedDestinations.get(key);
847
848            final ArrayList<Long> matches = new ArrayList<>();
849            // Find all the Locations that are >= than the last Append Location.
850            sd.locationIndex.visit(tx, new BTreeVisitor.GTEVisitor<Location, Long>(lastAppendLocation) {
851                @Override
852                protected void matched(Location key, Long value) {
853                    matches.add(value);
854                }
855            });
856
857            for (Long sequenceId : matches) {
858                MessageKeys keys = sd.orderIndex.remove(tx, sequenceId);
859                if (keys != null) {
860                    sd.locationIndex.remove(tx, keys.location);
861                    sd.messageIdIndex.remove(tx, keys.messageId);
862                    metadata.producerSequenceIdTracker.rollback(keys.messageId);
863                    undoCounter++;
864                    decrementAndSubSizeToStoreStat(key, keys.location.getSize());
865                    // TODO: do we need to modify the ack positions for the pub sub case?
866                }
867            }
868        }
869
870        if (undoCounter > 0) {
871            // The rolledback operations are basically in flight journal writes.  To avoid getting
872            // these the end user should do sync writes to the journal.
873            if (LOG.isInfoEnabled()) {
874                long end = System.currentTimeMillis();
875                LOG.info("Rolled back " + undoCounter + " messages from the index in " + ((end - start) / 1000.0f) + " seconds.");
876            }
877        }
878
879        undoCounter = 0;
880        start = System.currentTimeMillis();
881
882        // Lets be extra paranoid here and verify that all the datafiles being referenced
883        // by the indexes still exists.
884
885        final SequenceSet ss = new SequenceSet();
886        for (StoredDestination sd : storedDestinations.values()) {
887            // Use a visitor to cut down the number of pages that we load
888            sd.locationIndex.visit(tx, new BTreeVisitor<Location, Long>() {
889                int last=-1;
890
891                @Override
892                public boolean isInterestedInKeysBetween(Location first, Location second) {
893                    if( first==null ) {
894                        return !ss.contains(0, second.getDataFileId());
895                    } else if( second==null ) {
896                        return true;
897                    } else {
898                        return !ss.contains(first.getDataFileId(), second.getDataFileId());
899                    }
900                }
901
902                @Override
903                public void visit(List<Location> keys, List<Long> values) {
904                    for (Location l : keys) {
905                        int fileId = l.getDataFileId();
906                        if( last != fileId ) {
907                            ss.add(fileId);
908                            last = fileId;
909                        }
910                    }
911                }
912
913            });
914        }
915        HashSet<Integer> missingJournalFiles = new HashSet<>();
916        while (!ss.isEmpty()) {
917            missingJournalFiles.add((int) ss.removeFirst());
918        }
919
920        for (Entry<Integer, Set<Integer>> entry : metadata.ackMessageFileMap.entrySet()) {
921            missingJournalFiles.add(entry.getKey());
922            for (Integer i : entry.getValue()) {
923                missingJournalFiles.add(i);
924            }
925        }
926
927        missingJournalFiles.removeAll(journal.getFileMap().keySet());
928
929        if (!missingJournalFiles.isEmpty()) {
930            LOG.warn("Some journal files are missing: " + missingJournalFiles);
931        }
932
933        ArrayList<BTreeVisitor.Predicate<Location>> knownCorruption = new ArrayList<>();
934        ArrayList<BTreeVisitor.Predicate<Location>> missingPredicates = new ArrayList<>();
935        for (Integer missing : missingJournalFiles) {
936            missingPredicates.add(new BTreeVisitor.BetweenVisitor<Location, Long>(new Location(missing, 0), new Location(missing + 1, 0)));
937        }
938
939        if (checkForCorruptJournalFiles) {
940            Collection<DataFile> dataFiles = journal.getFileMap().values();
941            for (DataFile dataFile : dataFiles) {
942                int id = dataFile.getDataFileId();
943                // eof to next file id
944                missingPredicates.add(new BTreeVisitor.BetweenVisitor<Location, Long>(new Location(id, dataFile.getLength()), new Location(id + 1, 0)));
945                Sequence seq = dataFile.getCorruptedBlocks().getHead();
946                while (seq != null) {
947                    BTreeVisitor.BetweenVisitor<Location, Long> visitor =
948                        new BTreeVisitor.BetweenVisitor<>(new Location(id, (int) seq.getFirst()), new Location(id, (int) seq.getLast() + 1));
949                    missingPredicates.add(visitor);
950                    knownCorruption.add(visitor);
951                    seq = seq.getNext();
952                }
953            }
954        }
955
956        if (!missingPredicates.isEmpty()) {
957            for (Entry<String, StoredDestination> sdEntry : storedDestinations.entrySet()) {
958                final StoredDestination sd = sdEntry.getValue();
959                final LinkedHashMap<Long, Location> matches = new LinkedHashMap<>();
960                sd.locationIndex.visit(tx, new BTreeVisitor.OrVisitor<Location, Long>(missingPredicates) {
961                    @Override
962                    protected void matched(Location key, Long value) {
963                        matches.put(value, key);
964                    }
965                });
966
967                // If some message references are affected by the missing data files...
968                if (!matches.isEmpty()) {
969
970                    // We either 'gracefully' recover dropping the missing messages or
971                    // we error out.
972                    if( ignoreMissingJournalfiles ) {
973                        // Update the index to remove the references to the missing data
974                        for (Long sequenceId : matches.keySet()) {
975                            MessageKeys keys = sd.orderIndex.remove(tx, sequenceId);
976                            sd.locationIndex.remove(tx, keys.location);
977                            sd.messageIdIndex.remove(tx, keys.messageId);
978                            LOG.info("[" + sdEntry.getKey() + "] dropped: " + keys.messageId + " at corrupt location: " + keys.location);
979                            undoCounter++;
980                            decrementAndSubSizeToStoreStat(sdEntry.getKey(), keys.location.getSize());
981                            // TODO: do we need to modify the ack positions for the pub sub case?
982                        }
983                    } else {
984                        LOG.error("[" + sdEntry.getKey() + "] references corrupt locations: " + matches);
985                        throw new IOException("Detected missing/corrupt journal files referenced by:[" + sdEntry.getKey() + "] " +matches.size()+" messages affected.");
986                    }
987                }
988            }
989        }
990
991        if (!ignoreMissingJournalfiles) {
992            if (!knownCorruption.isEmpty()) {
993                LOG.error("Detected corrupt journal files. " + knownCorruption);
994                throw new IOException("Detected corrupt journal files. " + knownCorruption);
995            }
996
997            if (!missingJournalFiles.isEmpty()) {
998                LOG.error("Detected missing journal files. " + missingJournalFiles);
999                throw new IOException("Detected missing journal files. " + missingJournalFiles);
1000            }
1001        }
1002
1003        if (undoCounter > 0) {
1004            // The rolledback operations are basically in flight journal writes.  To avoid getting these the end user
1005            // should do sync writes to the journal.
1006            if (LOG.isInfoEnabled()) {
1007                long end = System.currentTimeMillis();
1008                LOG.info("Detected missing/corrupt journal files.  Dropped " + undoCounter + " messages from the index in " + ((end - start) / 1000.0f) + " seconds.");
1009            }
1010        }
1011    }
1012
1013    private Location nextRecoveryPosition;
1014    private Location lastRecoveryPosition;
1015
1016    public void incrementalRecover() throws IOException {
1017        this.indexLock.writeLock().lock();
1018        try {
1019            if( nextRecoveryPosition == null ) {
1020                if( lastRecoveryPosition==null ) {
1021                    nextRecoveryPosition = getRecoveryPosition();
1022                } else {
1023                    nextRecoveryPosition = journal.getNextLocation(lastRecoveryPosition);
1024                }
1025            }
1026            while (nextRecoveryPosition != null) {
1027                lastRecoveryPosition = nextRecoveryPosition;
1028                metadata.lastUpdate = lastRecoveryPosition;
1029                JournalCommand<?> message = load(lastRecoveryPosition);
1030                process(message, lastRecoveryPosition, (IndexAware) null);
1031                nextRecoveryPosition = journal.getNextLocation(lastRecoveryPosition);
1032            }
1033        } finally {
1034            this.indexLock.writeLock().unlock();
1035        }
1036    }
1037
1038    public Location getLastUpdatePosition() throws IOException {
1039        return metadata.lastUpdate;
1040    }
1041
1042    private Location getRecoveryPosition() throws IOException {
1043
1044        if (!this.forceRecoverIndex) {
1045
1046            // If we need to recover the transactions..
1047            if (metadata.firstInProgressTransactionLocation != null) {
1048                return metadata.firstInProgressTransactionLocation;
1049            }
1050
1051            // Perhaps there were no transactions...
1052            if( metadata.lastUpdate!=null) {
1053                // Start replay at the record after the last one recorded in the index file.
1054                return getNextInitializedLocation(metadata.lastUpdate);
1055            }
1056        }
1057        // This loads the first position.
1058        return journal.getNextLocation(null);
1059    }
1060
1061    private Location getNextInitializedLocation(Location location) throws IOException {
1062        Location mayNotBeInitialized = journal.getNextLocation(location);
1063        if (location.getSize() == NOT_SET && mayNotBeInitialized != null && mayNotBeInitialized.getSize() != NOT_SET) {
1064            // need to init size and type to skip
1065            return journal.getNextLocation(mayNotBeInitialized);
1066        } else {
1067            return mayNotBeInitialized;
1068        }
1069    }
1070
1071    protected void checkpointCleanup(final boolean cleanup) throws IOException {
1072        long start;
1073        this.indexLock.writeLock().lock();
1074        try {
1075            start = System.currentTimeMillis();
1076            if( !opened.get() ) {
1077                return;
1078            }
1079        } finally {
1080            this.indexLock.writeLock().unlock();
1081        }
1082        checkpointUpdate(cleanup);
1083        long end = System.currentTimeMillis();
1084        if (LOG_SLOW_ACCESS_TIME > 0 && end - start > LOG_SLOW_ACCESS_TIME) {
1085            if (LOG.isInfoEnabled()) {
1086                LOG.info("Slow KahaDB access: cleanup took " + (end - start));
1087            }
1088        }
1089    }
1090
1091    public ByteSequence toByteSequence(JournalCommand<?> data) throws IOException {
1092        int size = data.serializedSizeFramed();
1093        DataByteArrayOutputStream os = new DataByteArrayOutputStream(size + 1);
1094        os.writeByte(data.type().getNumber());
1095        data.writeFramed(os);
1096        return os.toByteSequence();
1097    }
1098
1099    // /////////////////////////////////////////////////////////////////
1100    // Methods call by the broker to update and query the store.
1101    // /////////////////////////////////////////////////////////////////
1102    public Location store(JournalCommand<?> data) throws IOException {
1103        return store(data, false, null,null);
1104    }
1105
1106    public Location store(JournalCommand<?> data, Runnable onJournalStoreComplete) throws IOException {
1107        return store(data, false, null, null, onJournalStoreComplete);
1108    }
1109
1110    public Location store(JournalCommand<?> data, boolean sync, IndexAware before,Runnable after) throws IOException {
1111        return store(data, sync, before, after, null);
1112    }
1113
1114    /**
1115     * All updated are are funneled through this method. The updates are converted
1116     * to a JournalMessage which is logged to the journal and then the data from
1117     * the JournalMessage is used to update the index just like it would be done
1118     * during a recovery process.
1119     */
1120    public Location store(JournalCommand<?> data, boolean sync, IndexAware before, Runnable after, Runnable onJournalStoreComplete) throws IOException {
1121        try {
1122            ByteSequence sequence = toByteSequence(data);
1123            Location location;
1124
1125            checkpointLock.readLock().lock();
1126            try {
1127
1128                long start = System.currentTimeMillis();
1129                location = onJournalStoreComplete == null ? journal.write(sequence, sync) : journal.write(sequence, onJournalStoreComplete) ;
1130                long start2 = System.currentTimeMillis();
1131                //Track the last async update so we know if we need to sync at the next checkpoint
1132                if (!sync && journal.isJournalDiskSyncPeriodic()) {
1133                    lastAsyncJournalUpdate.set(location);
1134                }
1135                process(data, location, before);
1136
1137                long end = System.currentTimeMillis();
1138                if (LOG_SLOW_ACCESS_TIME > 0 && end - start > LOG_SLOW_ACCESS_TIME) {
1139                    if (LOG.isInfoEnabled()) {
1140                        LOG.info("Slow KahaDB access: Journal append took: "+(start2-start)+" ms, Index update took "+(end-start2)+" ms");
1141                    }
1142                }
1143            } finally {
1144                checkpointLock.readLock().unlock();
1145            }
1146
1147            if (after != null) {
1148                after.run();
1149            }
1150
1151            if (scheduler == null && opened.get()) {
1152                startCheckpoint();
1153            }
1154            return location;
1155        } catch (IOException ioe) {
1156            LOG.error("KahaDB failed to store to Journal, command of type: " + data.type(), ioe);
1157            brokerService.handleIOException(ioe);
1158            throw ioe;
1159        }
1160    }
1161
1162    /**
1163     * Loads a previously stored JournalMessage
1164     *
1165     * @param location
1166     * @return
1167     * @throws IOException
1168     */
1169    public JournalCommand<?> load(Location location) throws IOException {
1170        long start = System.currentTimeMillis();
1171        ByteSequence data = journal.read(location);
1172        long end = System.currentTimeMillis();
1173        if( LOG_SLOW_ACCESS_TIME>0 && end-start > LOG_SLOW_ACCESS_TIME) {
1174            if (LOG.isInfoEnabled()) {
1175                LOG.info("Slow KahaDB access: Journal read took: "+(end-start)+" ms");
1176            }
1177        }
1178        DataByteArrayInputStream is = new DataByteArrayInputStream(data);
1179        byte readByte = is.readByte();
1180        KahaEntryType type = KahaEntryType.valueOf(readByte);
1181        if( type == null ) {
1182            try {
1183                is.close();
1184            } catch (IOException e) {}
1185            throw new IOException("Could not load journal record, null type information from: " + readByte + " at location: "+location);
1186        }
1187        JournalCommand<?> message = (JournalCommand<?>)type.createMessage();
1188        message.mergeFramed(is);
1189        return message;
1190    }
1191
1192    /**
1193     * do minimal recovery till we reach the last inDoubtLocation
1194     * @param data
1195     * @param location
1196     * @param inDoubtlocation
1197     * @throws IOException
1198     */
1199    void process(JournalCommand<?> data, final Location location, final Location inDoubtlocation) throws IOException {
1200        if (inDoubtlocation != null && location.compareTo(inDoubtlocation) >= 0) {
1201            process(data, location, (IndexAware) null);
1202        } else {
1203            // just recover producer audit
1204            data.visit(new Visitor() {
1205                @Override
1206                public void visit(KahaAddMessageCommand command) throws IOException {
1207                    metadata.producerSequenceIdTracker.isDuplicate(command.getMessageId());
1208                }
1209            });
1210        }
1211    }
1212
1213    // /////////////////////////////////////////////////////////////////
1214    // Journaled record processing methods. Once the record is journaled,
1215    // these methods handle applying the index updates. These may be called
1216    // from the recovery method too so they need to be idempotent
1217    // /////////////////////////////////////////////////////////////////
1218
1219    void process(JournalCommand<?> data, final Location location, final IndexAware onSequenceAssignedCallback) throws IOException {
1220        data.visit(new Visitor() {
1221            @Override
1222            public void visit(KahaAddMessageCommand command) throws IOException {
1223                process(command, location, onSequenceAssignedCallback);
1224            }
1225
1226            @Override
1227            public void visit(KahaRemoveMessageCommand command) throws IOException {
1228                process(command, location);
1229            }
1230
1231            @Override
1232            public void visit(KahaPrepareCommand command) throws IOException {
1233                process(command, location);
1234            }
1235
1236            @Override
1237            public void visit(KahaCommitCommand command) throws IOException {
1238                process(command, location, onSequenceAssignedCallback);
1239            }
1240
1241            @Override
1242            public void visit(KahaRollbackCommand command) throws IOException {
1243                process(command, location);
1244            }
1245
1246            @Override
1247            public void visit(KahaRemoveDestinationCommand command) throws IOException {
1248                process(command, location);
1249            }
1250
1251            @Override
1252            public void visit(KahaSubscriptionCommand command) throws IOException {
1253                process(command, location);
1254            }
1255
1256            @Override
1257            public void visit(KahaProducerAuditCommand command) throws IOException {
1258                processLocation(location);
1259            }
1260
1261            @Override
1262            public void visit(KahaAckMessageFileMapCommand command) throws IOException {
1263                processLocation(location);
1264            }
1265
1266            @Override
1267            public void visit(KahaTraceCommand command) {
1268                processLocation(location);
1269            }
1270
1271            @Override
1272            public void visit(KahaUpdateMessageCommand command) throws IOException {
1273                process(command, location);
1274            }
1275
1276            @Override
1277            public void visit(KahaRewrittenDataFileCommand command) throws IOException {
1278                process(command, location);
1279            }
1280        });
1281    }
1282
1283    @SuppressWarnings("rawtypes")
1284    protected void process(final KahaAddMessageCommand command, final Location location, final IndexAware runWithIndexLock) throws IOException {
1285        if (command.hasTransactionInfo()) {
1286            List<Operation> inflightTx = getInflightTx(command.getTransactionInfo());
1287            inflightTx.add(new AddOperation(command, location, runWithIndexLock));
1288        } else {
1289            this.indexLock.writeLock().lock();
1290            try {
1291                pageFile.tx().execute(new Transaction.Closure<IOException>() {
1292                    @Override
1293                    public void execute(Transaction tx) throws IOException {
1294                        long assignedIndex = updateIndex(tx, command, location);
1295                        if (runWithIndexLock != null) {
1296                            runWithIndexLock.sequenceAssignedWithIndexLocked(assignedIndex);
1297                        }
1298                    }
1299                });
1300
1301            } finally {
1302                this.indexLock.writeLock().unlock();
1303            }
1304        }
1305    }
1306
1307    protected void process(final KahaUpdateMessageCommand command, final Location location) throws IOException {
1308        this.indexLock.writeLock().lock();
1309        try {
1310            pageFile.tx().execute(new Transaction.Closure<IOException>() {
1311                @Override
1312                public void execute(Transaction tx) throws IOException {
1313                    updateIndex(tx, command, location);
1314                }
1315            });
1316        } finally {
1317            this.indexLock.writeLock().unlock();
1318        }
1319    }
1320
1321    @SuppressWarnings("rawtypes")
1322    protected void process(final KahaRemoveMessageCommand command, final Location location) throws IOException {
1323        if (command.hasTransactionInfo()) {
1324           List<Operation> inflightTx = getInflightTx(command.getTransactionInfo());
1325           inflightTx.add(new RemoveOperation(command, location));
1326        } else {
1327            this.indexLock.writeLock().lock();
1328            try {
1329                pageFile.tx().execute(new Transaction.Closure<IOException>() {
1330                    @Override
1331                    public void execute(Transaction tx) throws IOException {
1332                        updateIndex(tx, command, location);
1333                    }
1334                });
1335            } finally {
1336                this.indexLock.writeLock().unlock();
1337            }
1338        }
1339    }
1340
1341    protected void process(final KahaRemoveDestinationCommand command, final Location location) throws IOException {
1342        this.indexLock.writeLock().lock();
1343        try {
1344            pageFile.tx().execute(new Transaction.Closure<IOException>() {
1345                @Override
1346                public void execute(Transaction tx) throws IOException {
1347                    updateIndex(tx, command, location);
1348                }
1349            });
1350        } finally {
1351            this.indexLock.writeLock().unlock();
1352        }
1353    }
1354
1355    protected void process(final KahaSubscriptionCommand command, final Location location) throws IOException {
1356        this.indexLock.writeLock().lock();
1357        try {
1358            pageFile.tx().execute(new Transaction.Closure<IOException>() {
1359                @Override
1360                public void execute(Transaction tx) throws IOException {
1361                    updateIndex(tx, command, location);
1362                }
1363            });
1364        } finally {
1365            this.indexLock.writeLock().unlock();
1366        }
1367    }
1368
1369    protected void processLocation(final Location location) {
1370        this.indexLock.writeLock().lock();
1371        try {
1372            metadata.lastUpdate = location;
1373        } finally {
1374            this.indexLock.writeLock().unlock();
1375        }
1376    }
1377
1378    @SuppressWarnings("rawtypes")
1379    protected void process(KahaCommitCommand command, final Location location, final IndexAware before) throws IOException {
1380        TransactionId key = TransactionIdConversion.convert(command.getTransactionInfo());
1381        List<Operation> inflightTx;
1382        synchronized (inflightTransactions) {
1383            inflightTx = inflightTransactions.remove(key);
1384            if (inflightTx == null) {
1385                inflightTx = preparedTransactions.remove(key);
1386            }
1387        }
1388        if (inflightTx == null) {
1389            // only non persistent messages in this tx
1390            if (before != null) {
1391                before.sequenceAssignedWithIndexLocked(-1);
1392            }
1393            return;
1394        }
1395
1396        final List<Operation> messagingTx = inflightTx;
1397        indexLock.writeLock().lock();
1398        try {
1399            pageFile.tx().execute(new Transaction.Closure<IOException>() {
1400                @Override
1401                public void execute(Transaction tx) throws IOException {
1402                    for (Operation op : messagingTx) {
1403                        op.execute(tx);
1404                        recordAckMessageReferenceLocation(location, op.getLocation());
1405                    }
1406                }
1407            });
1408            metadata.lastUpdate = location;
1409        } finally {
1410            indexLock.writeLock().unlock();
1411        }
1412    }
1413
1414    @SuppressWarnings("rawtypes")
1415    protected void process(KahaPrepareCommand command, Location location) {
1416        TransactionId key = TransactionIdConversion.convert(command.getTransactionInfo());
1417        List<Operation> tx = null;
1418        synchronized (inflightTransactions) {
1419            tx = inflightTransactions.remove(key);
1420            if (tx != null) {
1421                preparedTransactions.put(key, tx);
1422            }
1423        }
1424        if (tx != null && !tx.isEmpty()) {
1425            indexLock.writeLock().lock();
1426            try {
1427                for (Operation op : tx) {
1428                    recordAckMessageReferenceLocation(location, op.getLocation());
1429                }
1430            } finally {
1431                indexLock.writeLock().unlock();
1432            }
1433        }
1434    }
1435
1436    @SuppressWarnings("rawtypes")
1437    protected void process(KahaRollbackCommand command, Location location)  throws IOException {
1438        TransactionId key = TransactionIdConversion.convert(command.getTransactionInfo());
1439        List<Operation> updates = null;
1440        synchronized (inflightTransactions) {
1441            updates = inflightTransactions.remove(key);
1442            if (updates == null) {
1443                updates = preparedTransactions.remove(key);
1444            }
1445        }
1446        if (key.isXATransaction() && updates != null && !updates.isEmpty()) {
1447            indexLock.writeLock().lock();
1448            try {
1449                for (Operation op : updates) {
1450                    recordAckMessageReferenceLocation(location, op.getLocation());
1451                }
1452            } finally {
1453                indexLock.writeLock().unlock();
1454            }
1455        }
1456    }
1457
1458    protected void process(KahaRewrittenDataFileCommand command, Location location)  throws IOException {
1459        final TreeSet<Integer> completeFileSet = new TreeSet<>(journal.getFileMap().keySet());
1460
1461        // Mark the current journal file as a compacted file so that gc checks can skip
1462        // over logs that are smaller compaction type logs.
1463        DataFile current = journal.getDataFileById(location.getDataFileId());
1464        current.setTypeCode(command.getRewriteType());
1465
1466        if (completeFileSet.contains(command.getSourceDataFileId()) && command.getSkipIfSourceExists()) {
1467            // Move offset so that next location read jumps to next file.
1468            location.setOffset(journalMaxFileLength);
1469        }
1470    }
1471
1472    // /////////////////////////////////////////////////////////////////
1473    // These methods do the actual index updates.
1474    // /////////////////////////////////////////////////////////////////
1475
1476    protected final ReentrantReadWriteLock indexLock = new ReentrantReadWriteLock();
1477    private final HashSet<Integer> journalFilesBeingReplicated = new HashSet<>();
1478
1479    long updateIndex(Transaction tx, KahaAddMessageCommand command, Location location) throws IOException {
1480        StoredDestination sd = getStoredDestination(command.getDestination(), tx);
1481
1482        // Skip adding the message to the index if this is a topic and there are
1483        // no subscriptions.
1484        if (sd.subscriptions != null && sd.subscriptions.isEmpty(tx)) {
1485            return -1;
1486        }
1487
1488        // Add the message.
1489        int priority = command.getPrioritySupported() ? command.getPriority() : javax.jms.Message.DEFAULT_PRIORITY;
1490        long id = sd.orderIndex.getNextMessageId();
1491        Long previous = sd.locationIndex.put(tx, location, id);
1492        if (previous == null) {
1493            previous = sd.messageIdIndex.put(tx, command.getMessageId(), id);
1494            if (previous == null) {
1495                incrementAndAddSizeToStoreStat(command.getDestination(), location.getSize());
1496                sd.orderIndex.put(tx, priority, id, new MessageKeys(command.getMessageId(), location));
1497                if (sd.subscriptions != null && !sd.subscriptions.isEmpty(tx)) {
1498                    addAckLocationForNewMessage(tx, command.getDestination(), sd, id);
1499                }
1500                metadata.lastUpdate = location;
1501            } else {
1502
1503                MessageKeys messageKeys = sd.orderIndex.get(tx, previous);
1504                if (messageKeys != null && messageKeys.location.compareTo(location) < 0) {
1505                    // If the message ID is indexed, then the broker asked us to store a duplicate before the message was dispatched and acked, we ignore this add attempt
1506                    LOG.warn("Duplicate message add attempt rejected. Destination: {}://{}, Message id: {}", command.getDestination().getType(), command.getDestination().getName(), command.getMessageId());
1507                }
1508                sd.messageIdIndex.put(tx, command.getMessageId(), previous);
1509                sd.locationIndex.remove(tx, location);
1510                id = -1;
1511            }
1512        } else {
1513            // restore the previous value.. Looks like this was a redo of a previously
1514            // added message. We don't want to assign it a new id as the other indexes would
1515            // be wrong..
1516            sd.locationIndex.put(tx, location, previous);
1517            // ensure sequence is not broken
1518            sd.orderIndex.revertNextMessageId();
1519            metadata.lastUpdate = location;
1520        }
1521        // record this id in any event, initial send or recovery
1522        metadata.producerSequenceIdTracker.isDuplicate(command.getMessageId());
1523
1524       return id;
1525    }
1526
1527    void trackPendingAdd(KahaDestination destination, Long seq) {
1528        StoredDestination sd = storedDestinations.get(key(destination));
1529        if (sd != null) {
1530            sd.trackPendingAdd(seq);
1531        }
1532    }
1533
1534    void trackPendingAddComplete(KahaDestination destination, Long seq) {
1535        StoredDestination sd = storedDestinations.get(key(destination));
1536        if (sd != null) {
1537            sd.trackPendingAddComplete(seq);
1538        }
1539    }
1540
1541    void updateIndex(Transaction tx, KahaUpdateMessageCommand updateMessageCommand, Location location) throws IOException {
1542        KahaAddMessageCommand command = updateMessageCommand.getMessage();
1543        StoredDestination sd = getStoredDestination(command.getDestination(), tx);
1544
1545        Long id = sd.messageIdIndex.get(tx, command.getMessageId());
1546        if (id != null) {
1547            MessageKeys previousKeys = sd.orderIndex.put(
1548                    tx,
1549                    command.getPrioritySupported() ? command.getPriority() : javax.jms.Message.DEFAULT_PRIORITY,
1550                    id,
1551                    new MessageKeys(command.getMessageId(), location)
1552            );
1553            sd.locationIndex.put(tx, location, id);
1554            incrementAndAddSizeToStoreStat(command.getDestination(), location.getSize());
1555
1556            if (previousKeys != null) {
1557                //Remove the existing from the size
1558                decrementAndSubSizeToStoreStat(command.getDestination(), previousKeys.location.getSize());
1559
1560                //update all the subscription metrics
1561                if (enableSubscriptionStatistics && sd.ackPositions != null && location.getSize() != previousKeys.location.getSize()) {
1562                    Iterator<Entry<String, SequenceSet>> iter = sd.ackPositions.iterator(tx);
1563                    while (iter.hasNext()) {
1564                        Entry<String, SequenceSet> e = iter.next();
1565                        if (e.getValue().contains(id)) {
1566                            incrementAndAddSizeToStoreStat(key(command.getDestination()), e.getKey(), location.getSize());
1567                            decrementAndSubSizeToStoreStat(key(command.getDestination()), e.getKey(), previousKeys.location.getSize());
1568                        }
1569                    }
1570                }
1571
1572                // on first update previous is original location, on recovery/replay it may be the updated location
1573                if(!previousKeys.location.equals(location)) {
1574                    sd.locationIndex.remove(tx, previousKeys.location);
1575                }
1576            }
1577            metadata.lastUpdate = location;
1578        } else {
1579            //Add the message if it can't be found
1580            this.updateIndex(tx, command, location);
1581        }
1582    }
1583
1584    void updateIndex(Transaction tx, KahaRemoveMessageCommand command, Location ackLocation) throws IOException {
1585        StoredDestination sd = getStoredDestination(command.getDestination(), tx);
1586        if (!command.hasSubscriptionKey()) {
1587
1588            // In the queue case we just remove the message from the index..
1589            Long sequenceId = sd.messageIdIndex.remove(tx, command.getMessageId());
1590            if (sequenceId != null) {
1591                MessageKeys keys = sd.orderIndex.remove(tx, sequenceId);
1592                if (keys != null) {
1593                    sd.locationIndex.remove(tx, keys.location);
1594                    decrementAndSubSizeToStoreStat(command.getDestination(), keys.location.getSize());
1595                    recordAckMessageReferenceLocation(ackLocation, keys.location);
1596                    metadata.lastUpdate = ackLocation;
1597                }  else if (LOG.isDebugEnabled()) {
1598                    LOG.debug("message not found in order index: " + sequenceId  + " for: " + command.getMessageId());
1599                }
1600            } else if (LOG.isDebugEnabled()) {
1601                LOG.debug("message not found in sequence id index: " + command.getMessageId());
1602            }
1603        } else {
1604            // In the topic case we need remove the message once it's been acked
1605            // by all the subs
1606            Long sequence = sd.messageIdIndex.get(tx, command.getMessageId());
1607
1608            // Make sure it's a valid message id...
1609            if (sequence != null) {
1610                String subscriptionKey = command.getSubscriptionKey();
1611                if (command.getAck() != UNMATCHED) {
1612                    sd.orderIndex.get(tx, sequence);
1613                    byte priority = sd.orderIndex.lastGetPriority();
1614                    sd.subscriptionAcks.put(tx, subscriptionKey, new LastAck(sequence, priority));
1615                }
1616
1617                MessageKeys keys = sd.orderIndex.get(tx, sequence);
1618                if (keys != null) {
1619                    recordAckMessageReferenceLocation(ackLocation, keys.location);
1620                }
1621                // The following method handles deleting un-referenced messages.
1622                removeAckLocation(command, tx, sd, subscriptionKey, sequence);
1623                metadata.lastUpdate = ackLocation;
1624            } else if (LOG.isDebugEnabled()) {
1625                LOG.debug("on ack, no message sequence exists for id: " + command.getMessageId() + " and sub: " + command.getSubscriptionKey());
1626            }
1627
1628        }
1629    }
1630
1631    private void recordAckMessageReferenceLocation(Location ackLocation, Location messageLocation) {
1632        Set<Integer> referenceFileIds = metadata.ackMessageFileMap.get(Integer.valueOf(ackLocation.getDataFileId()));
1633        if (referenceFileIds == null) {
1634            referenceFileIds = new HashSet<>();
1635            referenceFileIds.add(messageLocation.getDataFileId());
1636            metadata.ackMessageFileMap.put(ackLocation.getDataFileId(), referenceFileIds);
1637        } else {
1638            Integer id = Integer.valueOf(messageLocation.getDataFileId());
1639            if (!referenceFileIds.contains(id)) {
1640                referenceFileIds.add(id);
1641            }
1642        }
1643    }
1644
1645    void updateIndex(Transaction tx, KahaRemoveDestinationCommand command, Location location) throws IOException {
1646        StoredDestination sd = getStoredDestination(command.getDestination(), tx);
1647        sd.orderIndex.remove(tx);
1648
1649        sd.locationIndex.clear(tx);
1650        sd.locationIndex.unload(tx);
1651        tx.free(sd.locationIndex.getPageId());
1652
1653        sd.messageIdIndex.clear(tx);
1654        sd.messageIdIndex.unload(tx);
1655        tx.free(sd.messageIdIndex.getPageId());
1656
1657        if (sd.subscriptions != null) {
1658            sd.subscriptions.clear(tx);
1659            sd.subscriptions.unload(tx);
1660            tx.free(sd.subscriptions.getPageId());
1661
1662            sd.subscriptionAcks.clear(tx);
1663            sd.subscriptionAcks.unload(tx);
1664            tx.free(sd.subscriptionAcks.getPageId());
1665
1666            sd.ackPositions.clear(tx);
1667            sd.ackPositions.unload(tx);
1668            tx.free(sd.ackPositions.getHeadPageId());
1669
1670            sd.subLocations.clear(tx);
1671            sd.subLocations.unload(tx);
1672            tx.free(sd.subLocations.getHeadPageId());
1673        }
1674
1675        String key = key(command.getDestination());
1676        storedDestinations.remove(key);
1677        metadata.destinations.remove(tx, key);
1678        clearStoreStats(command.getDestination());
1679        storeCache.remove(key(command.getDestination()));
1680    }
1681
1682    void updateIndex(Transaction tx, KahaSubscriptionCommand command, Location location) throws IOException {
1683        StoredDestination sd = getStoredDestination(command.getDestination(), tx);
1684        final String subscriptionKey = command.getSubscriptionKey();
1685
1686        // If set then we are creating it.. otherwise we are destroying the sub
1687        if (command.hasSubscriptionInfo()) {
1688            Location existing = sd.subLocations.get(tx, subscriptionKey);
1689            if (existing != null && existing.compareTo(location) == 0) {
1690                // replay on recovery, ignore
1691                LOG.trace("ignoring journal replay of replay of sub from: " + location);
1692                return;
1693            }
1694
1695            sd.subscriptions.put(tx, subscriptionKey, command);
1696            sd.subLocations.put(tx, subscriptionKey, location);
1697            long ackLocation=NOT_ACKED;
1698            if (!command.getRetroactive()) {
1699                ackLocation = sd.orderIndex.nextMessageId-1;
1700            } else {
1701                addAckLocationForRetroactiveSub(tx, sd, subscriptionKey);
1702            }
1703            sd.subscriptionAcks.put(tx, subscriptionKey, new LastAck(ackLocation));
1704            sd.subscriptionCache.add(subscriptionKey);
1705        } else {
1706            // delete the sub...
1707            sd.subscriptions.remove(tx, subscriptionKey);
1708            sd.subLocations.remove(tx, subscriptionKey);
1709            sd.subscriptionAcks.remove(tx, subscriptionKey);
1710            sd.subscriptionCache.remove(subscriptionKey);
1711            removeAckLocationsForSub(command, tx, sd, subscriptionKey);
1712            MessageStoreSubscriptionStatistics subStats = getSubStats(key(command.getDestination()));
1713            if (subStats != null) {
1714                subStats.removeSubscription(subscriptionKey);
1715            }
1716
1717            if (sd.subscriptions.isEmpty(tx)) {
1718                // remove the stored destination
1719                KahaRemoveDestinationCommand removeDestinationCommand = new KahaRemoveDestinationCommand();
1720                removeDestinationCommand.setDestination(command.getDestination());
1721                updateIndex(tx, removeDestinationCommand, null);
1722                clearStoreStats(command.getDestination());
1723            }
1724        }
1725    }
1726
1727    private void checkpointUpdate(final boolean cleanup) throws IOException {
1728        checkpointLock.writeLock().lock();
1729        try {
1730            this.indexLock.writeLock().lock();
1731            try {
1732                Set<Integer> filesToGc = pageFile.tx().execute(new Transaction.CallableClosure<Set<Integer>, IOException>() {
1733                    @Override
1734                    public Set<Integer> execute(Transaction tx) throws IOException {
1735                        return checkpointUpdate(tx, cleanup);
1736                    }
1737                });
1738                pageFile.flush();
1739                // after the index update such that partial removal does not leave dangling references in the index.
1740                journal.removeDataFiles(filesToGc);
1741            } finally {
1742                this.indexLock.writeLock().unlock();
1743            }
1744
1745        } finally {
1746            checkpointLock.writeLock().unlock();
1747        }
1748    }
1749
1750    /**
1751     * @param tx
1752     * @throws IOException
1753     */
1754    Set<Integer> checkpointUpdate(Transaction tx, boolean cleanup) throws IOException {
1755        MDC.put("activemq.persistenceDir", getDirectory().getName());
1756        LOG.debug("Checkpoint started.");
1757
1758        // reflect last update exclusive of current checkpoint
1759        Location lastUpdate = metadata.lastUpdate;
1760
1761        metadata.state = OPEN_STATE;
1762        metadata.producerSequenceIdTrackerLocation = checkpointProducerAudit();
1763        metadata.ackMessageFileMapLocation = checkpointAckMessageFileMap();
1764        Location[] inProgressTxRange = getInProgressTxLocationRange();
1765        metadata.firstInProgressTransactionLocation = inProgressTxRange[0];
1766        tx.store(metadata.page, metadataMarshaller, true);
1767
1768        final TreeSet<Integer> gcCandidateSet = new TreeSet<>();
1769        if (cleanup) {
1770
1771            final TreeSet<Integer> completeFileSet = new TreeSet<>(journal.getFileMap().keySet());
1772            gcCandidateSet.addAll(completeFileSet);
1773
1774            if (LOG.isTraceEnabled()) {
1775                LOG.trace("Last update: " + lastUpdate + ", full gc candidates set: " + gcCandidateSet);
1776            }
1777
1778            if (lastUpdate != null) {
1779                // we won't delete past the last update, ackCompaction journal can be a candidate in error
1780                gcCandidateSet.removeAll(new TreeSet<Integer>(gcCandidateSet.tailSet(lastUpdate.getDataFileId())));
1781            }
1782
1783            // Don't GC files under replication
1784            if( journalFilesBeingReplicated!=null ) {
1785                gcCandidateSet.removeAll(journalFilesBeingReplicated);
1786            }
1787
1788            if (metadata.producerSequenceIdTrackerLocation != null) {
1789                int dataFileId = metadata.producerSequenceIdTrackerLocation.getDataFileId();
1790                if (gcCandidateSet.contains(dataFileId) && gcCandidateSet.first() == dataFileId) {
1791                    // rewrite so we don't prevent gc
1792                    metadata.producerSequenceIdTracker.setModified(true);
1793                    if (LOG.isTraceEnabled()) {
1794                        LOG.trace("rewriting producerSequenceIdTracker:" + metadata.producerSequenceIdTrackerLocation);
1795                    }
1796                }
1797                gcCandidateSet.remove(dataFileId);
1798                if (LOG.isTraceEnabled()) {
1799                    LOG.trace("gc candidates after producerSequenceIdTrackerLocation:" + metadata.producerSequenceIdTrackerLocation + ", " + gcCandidateSet);
1800                }
1801            }
1802
1803            if (metadata.ackMessageFileMapLocation != null) {
1804                int dataFileId = metadata.ackMessageFileMapLocation.getDataFileId();
1805                gcCandidateSet.remove(dataFileId);
1806                if (LOG.isTraceEnabled()) {
1807                    LOG.trace("gc candidates after ackMessageFileMapLocation:" + metadata.ackMessageFileMapLocation + ", " + gcCandidateSet);
1808                }
1809            }
1810
1811            // Don't GC files referenced by in-progress tx
1812            if (inProgressTxRange[0] != null) {
1813                for (int pendingTx=inProgressTxRange[0].getDataFileId(); pendingTx <= inProgressTxRange[1].getDataFileId(); pendingTx++) {
1814                    gcCandidateSet.remove(pendingTx);
1815                }
1816            }
1817            if (LOG.isTraceEnabled()) {
1818                LOG.trace("gc candidates after in progress tx range:" + Arrays.asList(inProgressTxRange) + ", " + gcCandidateSet);
1819            }
1820
1821            // Go through all the destinations to see if any of them can remove GC candidates.
1822            for (Entry<String, StoredDestination> entry : storedDestinations.entrySet()) {
1823                if( gcCandidateSet.isEmpty() ) {
1824                    break;
1825                }
1826
1827                // Use a visitor to cut down the number of pages that we load
1828                entry.getValue().locationIndex.visit(tx, new BTreeVisitor<Location, Long>() {
1829                    int last=-1;
1830                    @Override
1831                    public boolean isInterestedInKeysBetween(Location first, Location second) {
1832                        if( first==null ) {
1833                            SortedSet<Integer> subset = gcCandidateSet.headSet(second.getDataFileId()+1);
1834                            if( !subset.isEmpty() && subset.last() == second.getDataFileId() ) {
1835                                subset.remove(second.getDataFileId());
1836                            }
1837                            return !subset.isEmpty();
1838                        } else if( second==null ) {
1839                            SortedSet<Integer> subset = gcCandidateSet.tailSet(first.getDataFileId());
1840                            if( !subset.isEmpty() && subset.first() == first.getDataFileId() ) {
1841                                subset.remove(first.getDataFileId());
1842                            }
1843                            return !subset.isEmpty();
1844                        } else {
1845                            SortedSet<Integer> subset = gcCandidateSet.subSet(first.getDataFileId(), second.getDataFileId()+1);
1846                            if( !subset.isEmpty() && subset.first() == first.getDataFileId() ) {
1847                                subset.remove(first.getDataFileId());
1848                            }
1849                            if( !subset.isEmpty() && subset.last() == second.getDataFileId() ) {
1850                                subset.remove(second.getDataFileId());
1851                            }
1852                            return !subset.isEmpty();
1853                        }
1854                    }
1855
1856                    @Override
1857                    public void visit(List<Location> keys, List<Long> values) {
1858                        for (Location l : keys) {
1859                            int fileId = l.getDataFileId();
1860                            if( last != fileId ) {
1861                                gcCandidateSet.remove(fileId);
1862                                last = fileId;
1863                            }
1864                        }
1865                    }
1866                });
1867
1868                // Durable Subscription
1869                if (entry.getValue().subLocations != null) {
1870                    Iterator<Entry<String, Location>> iter = entry.getValue().subLocations.iterator(tx);
1871                    while (iter.hasNext()) {
1872                        Entry<String, Location> subscription = iter.next();
1873                        int dataFileId = subscription.getValue().getDataFileId();
1874
1875                        // Move subscription along if it has no outstanding messages that need ack'd
1876                        // and its in the last log file in the journal.
1877                        if (!gcCandidateSet.isEmpty() && gcCandidateSet.first() == dataFileId) {
1878                            final StoredDestination destination = entry.getValue();
1879                            final String subscriptionKey = subscription.getKey();
1880                            SequenceSet pendingAcks = destination.ackPositions.get(tx, subscriptionKey);
1881
1882                            // When pending is size one that is the next message Id meaning there
1883                            // are no pending messages currently.
1884                            if (pendingAcks == null || pendingAcks.isEmpty() ||
1885                                (pendingAcks.size() == 1 && pendingAcks.getTail().range() == 1)) {
1886
1887                                if (LOG.isTraceEnabled()) {
1888                                    LOG.trace("Found candidate for rewrite: {} from file {}", entry.getKey(), dataFileId);
1889                                }
1890
1891                                final KahaSubscriptionCommand kahaSub =
1892                                    destination.subscriptions.get(tx, subscriptionKey);
1893                                destination.subLocations.put(
1894                                    tx, subscriptionKey, checkpointSubscriptionCommand(kahaSub));
1895
1896                                // Skips the remove from candidates if we rewrote the subscription
1897                                // in order to prevent duplicate subscription commands on recover.
1898                                // If another subscription is on the same file and isn't rewritten
1899                                // than it will remove the file from the set.
1900                                continue;
1901                            }
1902                        }
1903
1904                        gcCandidateSet.remove(dataFileId);
1905                    }
1906                }
1907
1908                if (LOG.isTraceEnabled()) {
1909                    LOG.trace("gc candidates after dest:" + entry.getKey() + ", " + gcCandidateSet);
1910                }
1911            }
1912
1913            // check we are not deleting file with ack for in-use journal files
1914            if (LOG.isTraceEnabled()) {
1915                LOG.trace("gc candidates: " + gcCandidateSet);
1916                LOG.trace("ackMessageFileMap: " +  metadata.ackMessageFileMap);
1917            }
1918
1919            boolean ackMessageFileMapMod = false;
1920            Iterator<Integer> candidates = gcCandidateSet.iterator();
1921            while (candidates.hasNext()) {
1922                Integer candidate = candidates.next();
1923                Set<Integer> referencedFileIds = metadata.ackMessageFileMap.get(candidate);
1924                if (referencedFileIds != null) {
1925                    for (Integer referencedFileId : referencedFileIds) {
1926                        if (completeFileSet.contains(referencedFileId) && !gcCandidateSet.contains(referencedFileId)) {
1927                            // active file that is not targeted for deletion is referenced so don't delete
1928                            candidates.remove();
1929                            break;
1930                        }
1931                    }
1932                    if (gcCandidateSet.contains(candidate)) {
1933                        ackMessageFileMapMod |= (metadata.ackMessageFileMap.remove(candidate) != null);
1934                    } else {
1935                        if (LOG.isTraceEnabled()) {
1936                            LOG.trace("not removing data file: " + candidate
1937                                    + " as contained ack(s) refer to referenced file: " + referencedFileIds);
1938                        }
1939                    }
1940                }
1941            }
1942
1943            if (!gcCandidateSet.isEmpty()) {
1944                LOG.debug("Cleanup removing the data files: {}", gcCandidateSet);
1945                for (Integer candidate : gcCandidateSet) {
1946                    for (Set<Integer> ackFiles : metadata.ackMessageFileMap.values()) {
1947                        ackMessageFileMapMod |= ackFiles.remove(candidate);
1948                    }
1949                }
1950                if (ackMessageFileMapMod) {
1951                    checkpointUpdate(tx, false);
1952                }
1953            } else if (isEnableAckCompaction()) {
1954                if (++checkPointCyclesWithNoGC >= getCompactAcksAfterNoGC()) {
1955                    // First check length of journal to make sure it makes sense to even try.
1956                    //
1957                    // If there is only one journal file with Acks in it we don't need to move
1958                    // it since it won't be chained to any later logs.
1959                    //
1960                    // If the logs haven't grown since the last time then we need to compact
1961                    // otherwise there seems to still be room for growth and we don't need to incur
1962                    // the overhead.  Depending on configuration this check can be avoided and
1963                    // Ack compaction will run any time the store has not GC'd a journal file in
1964                    // the configured amount of cycles.
1965                    if (metadata.ackMessageFileMap.size() > 1 &&
1966                        (journalLogOnLastCompactionCheck == journal.getCurrentDataFileId() || isCompactAcksIgnoresStoreGrowth())) {
1967
1968                        LOG.trace("No files GC'd checking if threshold to ACK compaction has been met.");
1969                        try {
1970                            scheduler.execute(new AckCompactionRunner());
1971                        } catch (Exception ex) {
1972                            LOG.warn("Error on queueing the Ack Compactor", ex);
1973                        }
1974                    } else {
1975                        LOG.trace("Journal activity detected, no Ack compaction scheduled.");
1976                    }
1977
1978                    checkPointCyclesWithNoGC = 0;
1979                } else {
1980                    LOG.trace("Not yet time to check for compaction: {} of {} cycles",
1981                              checkPointCyclesWithNoGC, getCompactAcksAfterNoGC());
1982                }
1983
1984                journalLogOnLastCompactionCheck = journal.getCurrentDataFileId();
1985            }
1986        }
1987        MDC.remove("activemq.persistenceDir");
1988
1989        LOG.debug("Checkpoint done.");
1990        return gcCandidateSet;
1991    }
1992
1993    private final class AckCompactionRunner implements Runnable {
1994
1995        @Override
1996        public void run() {
1997
1998            int journalToAdvance = -1;
1999            Set<Integer> journalLogsReferenced = new HashSet<>();
2000
2001            //flag to know whether the ack forwarding completed without an exception
2002            boolean forwarded = false;
2003
2004            try {
2005                //acquire the checkpoint lock to prevent other threads from
2006                //running a checkpoint while this is running
2007                //
2008                //Normally this task runs on the same executor as the checkpoint task
2009                //so this ack compaction runner wouldn't run at the same time as the checkpoint task.
2010                //
2011                //However, there are two cases where this isn't always true.
2012                //First, the checkpoint() method is public and can be called through the
2013                //PersistenceAdapter interface by someone at the same time this is running.
2014                //Second, a checkpoint is called during shutdown without using the executor.
2015                //
2016                //In the future it might be better to just remove the checkpointLock entirely
2017                //and only use the executor but this would need to be examined for any unintended
2018                //consequences
2019                checkpointLock.readLock().lock();
2020
2021                try {
2022
2023                    // Lock index to capture the ackMessageFileMap data
2024                    indexLock.writeLock().lock();
2025
2026                    // Map keys might not be sorted, find the earliest log file to forward acks
2027                    // from and move only those, future cycles can chip away at more as needed.
2028                    // We won't move files that are themselves rewritten on a previous compaction.
2029                    List<Integer> journalFileIds = new ArrayList<>(metadata.ackMessageFileMap.keySet());
2030                    Collections.sort(journalFileIds);
2031                    for (Integer journalFileId : journalFileIds) {
2032                        DataFile current = journal.getDataFileById(journalFileId);
2033                        if (current != null && current.getTypeCode() != COMPACTED_JOURNAL_FILE) {
2034                            journalToAdvance = journalFileId;
2035                            break;
2036                        }
2037                    }
2038
2039                    // Check if we found one, or if we only found the current file being written to.
2040                    if (journalToAdvance == -1 || journalToAdvance == journal.getCurrentDataFileId()) {
2041                        return;
2042                    }
2043
2044                    journalLogsReferenced.addAll(metadata.ackMessageFileMap.get(journalToAdvance));
2045
2046                } finally {
2047                    indexLock.writeLock().unlock();
2048                }
2049
2050                try {
2051                    // Background rewrite of the old acks
2052                    forwardAllAcks(journalToAdvance, journalLogsReferenced);
2053                    forwarded = true;
2054                } catch (IOException ioe) {
2055                    LOG.error("Forwarding of acks failed", ioe);
2056                    brokerService.handleIOException(ioe);
2057                } catch (Throwable e) {
2058                    LOG.error("Forwarding of acks failed", e);
2059                    brokerService.handleIOException(IOExceptionSupport.create(e));
2060                }
2061            } finally {
2062                checkpointLock.readLock().unlock();
2063            }
2064
2065            try {
2066                if (forwarded) {
2067                    // Checkpoint with changes from the ackMessageFileMap
2068                    checkpointUpdate(false);
2069                }
2070            } catch (IOException ioe) {
2071                LOG.error("Checkpoint failed", ioe);
2072                brokerService.handleIOException(ioe);
2073            } catch (Throwable e) {
2074                LOG.error("Checkpoint failed", e);
2075                brokerService.handleIOException(IOExceptionSupport.create(e));
2076            }
2077        }
2078    }
2079
2080    private void forwardAllAcks(Integer journalToRead, Set<Integer> journalLogsReferenced) throws IllegalStateException, IOException {
2081        LOG.trace("Attempting to move all acks in journal:{} to the front.", journalToRead);
2082
2083        DataFile forwardsFile = journal.reserveDataFile();
2084        forwardsFile.setTypeCode(COMPACTED_JOURNAL_FILE);
2085        LOG.trace("Reserved file for forwarded acks: {}", forwardsFile);
2086
2087        Map<Integer, Set<Integer>> updatedAckLocations = new HashMap<>();
2088
2089        try (TargetedDataFileAppender appender = new TargetedDataFileAppender(journal, forwardsFile);) {
2090            KahaRewrittenDataFileCommand compactionMarker = new KahaRewrittenDataFileCommand();
2091            compactionMarker.setSourceDataFileId(journalToRead);
2092            compactionMarker.setRewriteType(forwardsFile.getTypeCode());
2093
2094            ByteSequence payload = toByteSequence(compactionMarker);
2095            appender.storeItem(payload, Journal.USER_RECORD_TYPE, false);
2096            LOG.trace("Marked ack rewrites file as replacing file: {}", journalToRead);
2097
2098            final Location limit = new Location(journalToRead + 1, 0);
2099            Location nextLocation = getNextLocationForAckForward(new Location(journalToRead, 0), limit);
2100            while (nextLocation != null) {
2101                JournalCommand<?> command = null;
2102                try {
2103                    command = load(nextLocation);
2104                } catch (IOException ex) {
2105                    LOG.trace("Error loading command during ack forward: {}", nextLocation);
2106                }
2107
2108                if (shouldForward(command)) {
2109                    payload = toByteSequence(command);
2110                    Location location = appender.storeItem(payload, Journal.USER_RECORD_TYPE, false);
2111                    updatedAckLocations.put(location.getDataFileId(), journalLogsReferenced);
2112                }
2113
2114                nextLocation = getNextLocationForAckForward(nextLocation, limit);
2115            }
2116        }
2117
2118        LOG.trace("ACKS forwarded, updates for ack locations: {}", updatedAckLocations);
2119
2120        // Lock index while we update the ackMessageFileMap.
2121        indexLock.writeLock().lock();
2122
2123        // Update the ack map with the new locations of the acks
2124        for (Entry<Integer, Set<Integer>> entry : updatedAckLocations.entrySet()) {
2125            Set<Integer> referenceFileIds = metadata.ackMessageFileMap.get(entry.getKey());
2126            if (referenceFileIds == null) {
2127                referenceFileIds = new HashSet<>();
2128                referenceFileIds.addAll(entry.getValue());
2129                metadata.ackMessageFileMap.put(entry.getKey(), referenceFileIds);
2130            } else {
2131                referenceFileIds.addAll(entry.getValue());
2132            }
2133        }
2134
2135        // remove the old location data from the ack map so that the old journal log file can
2136        // be removed on next GC.
2137        metadata.ackMessageFileMap.remove(journalToRead);
2138
2139        indexLock.writeLock().unlock();
2140
2141        LOG.trace("ACK File Map following updates: {}", metadata.ackMessageFileMap);
2142    }
2143
2144    private boolean shouldForward(JournalCommand<?> command) {
2145        boolean result = false;
2146        if (command != null) {
2147            if (command instanceof KahaRemoveMessageCommand) {
2148                result = true;
2149            } else if (command instanceof KahaCommitCommand) {
2150                KahaCommitCommand kahaCommitCommand = (KahaCommitCommand) command;
2151                if (kahaCommitCommand.hasTransactionInfo() && kahaCommitCommand.getTransactionInfo().hasXaTransactionId()) {
2152                    result = true;
2153                }
2154            }
2155        }
2156        return result;
2157    }
2158
2159    private Location getNextLocationForAckForward(final Location nextLocation, final Location limit) {
2160        //getNextLocation() can throw an IOException, we should handle it and set
2161        //nextLocation to null and abort gracefully
2162        //Should not happen in the normal case
2163        Location location = null;
2164        try {
2165            location = journal.getNextLocation(nextLocation, limit);
2166        } catch (IOException e) {
2167            LOG.warn("Failed to load next journal location after: {}, reason: {}", nextLocation, e);
2168            if (LOG.isDebugEnabled()) {
2169                LOG.debug("Failed to load next journal location after: {}", nextLocation, e);
2170            }
2171        }
2172        return location;
2173    }
2174
2175    final Runnable nullCompletionCallback = new Runnable() {
2176        @Override
2177        public void run() {
2178        }
2179    };
2180
2181    private Location checkpointProducerAudit() throws IOException {
2182        if (metadata.producerSequenceIdTracker == null || metadata.producerSequenceIdTracker.modified()) {
2183            ByteArrayOutputStream baos = new ByteArrayOutputStream();
2184            ObjectOutputStream oout = new ObjectOutputStream(baos);
2185            oout.writeObject(metadata.producerSequenceIdTracker);
2186            oout.flush();
2187            oout.close();
2188            // using completion callback allows a disk sync to be avoided when enableJournalDiskSyncs = false
2189            Location location = store(new KahaProducerAuditCommand().setAudit(new Buffer(baos.toByteArray())), nullCompletionCallback);
2190            try {
2191                location.getLatch().await();
2192                if (location.getException().get() != null) {
2193                    throw location.getException().get();
2194                }
2195            } catch (InterruptedException e) {
2196                throw new InterruptedIOException(e.toString());
2197            }
2198            return location;
2199        }
2200        return metadata.producerSequenceIdTrackerLocation;
2201    }
2202
2203    private Location checkpointAckMessageFileMap() throws IOException {
2204        ByteArrayOutputStream baos = new ByteArrayOutputStream();
2205        ObjectOutputStream oout = new ObjectOutputStream(baos);
2206        oout.writeObject(metadata.ackMessageFileMap);
2207        oout.flush();
2208        oout.close();
2209        // using completion callback allows a disk sync to be avoided when enableJournalDiskSyncs = false
2210        Location location = store(new KahaAckMessageFileMapCommand().setAckMessageFileMap(new Buffer(baos.toByteArray())), nullCompletionCallback);
2211        try {
2212            location.getLatch().await();
2213        } catch (InterruptedException e) {
2214            throw new InterruptedIOException(e.toString());
2215        }
2216        return location;
2217    }
2218
2219    private Location checkpointSubscriptionCommand(KahaSubscriptionCommand subscription) throws IOException {
2220
2221        ByteSequence sequence = toByteSequence(subscription);
2222        Location location = journal.write(sequence, nullCompletionCallback) ;
2223
2224        try {
2225            location.getLatch().await();
2226        } catch (InterruptedException e) {
2227            throw new InterruptedIOException(e.toString());
2228        }
2229        return location;
2230    }
2231
2232    public HashSet<Integer> getJournalFilesBeingReplicated() {
2233        return journalFilesBeingReplicated;
2234    }
2235
2236    // /////////////////////////////////////////////////////////////////
2237    // StoredDestination related implementation methods.
2238    // /////////////////////////////////////////////////////////////////
2239
2240    protected final HashMap<String, StoredDestination> storedDestinations = new HashMap<>();
2241
2242    static class MessageKeys {
2243        final String messageId;
2244        final Location location;
2245
2246        public MessageKeys(String messageId, Location location) {
2247            this.messageId=messageId;
2248            this.location=location;
2249        }
2250
2251        @Override
2252        public String toString() {
2253            return "["+messageId+","+location+"]";
2254        }
2255    }
2256
2257    protected class MessageKeysMarshaller extends VariableMarshaller<MessageKeys> {
2258        final LocationSizeMarshaller locationSizeMarshaller = new LocationSizeMarshaller();
2259
2260        @Override
2261        public MessageKeys readPayload(DataInput dataIn) throws IOException {
2262            return new MessageKeys(dataIn.readUTF(), locationSizeMarshaller.readPayload(dataIn));
2263        }
2264
2265        @Override
2266        public void writePayload(MessageKeys object, DataOutput dataOut) throws IOException {
2267            dataOut.writeUTF(object.messageId);
2268            locationSizeMarshaller.writePayload(object.location, dataOut);
2269        }
2270    }
2271
2272    class LastAck {
2273        long lastAckedSequence;
2274        byte priority;
2275
2276        public LastAck(LastAck source) {
2277            this.lastAckedSequence = source.lastAckedSequence;
2278            this.priority = source.priority;
2279        }
2280
2281        public LastAck() {
2282            this.priority = MessageOrderIndex.HI;
2283        }
2284
2285        public LastAck(long ackLocation) {
2286            this.lastAckedSequence = ackLocation;
2287            this.priority = MessageOrderIndex.LO;
2288        }
2289
2290        public LastAck(long ackLocation, byte priority) {
2291            this.lastAckedSequence = ackLocation;
2292            this.priority = priority;
2293        }
2294
2295        @Override
2296        public String toString() {
2297            return "[" + lastAckedSequence + ":" + priority + "]";
2298        }
2299    }
2300
2301    protected class LastAckMarshaller implements Marshaller<LastAck> {
2302
2303        @Override
2304        public void writePayload(LastAck object, DataOutput dataOut) throws IOException {
2305            dataOut.writeLong(object.lastAckedSequence);
2306            dataOut.writeByte(object.priority);
2307        }
2308
2309        @Override
2310        public LastAck readPayload(DataInput dataIn) throws IOException {
2311            LastAck lastAcked = new LastAck();
2312            lastAcked.lastAckedSequence = dataIn.readLong();
2313            if (metadata.version >= 3) {
2314                lastAcked.priority = dataIn.readByte();
2315            }
2316            return lastAcked;
2317        }
2318
2319        @Override
2320        public int getFixedSize() {
2321            return 9;
2322        }
2323
2324        @Override
2325        public LastAck deepCopy(LastAck source) {
2326            return new LastAck(source);
2327        }
2328
2329        @Override
2330        public boolean isDeepCopySupported() {
2331            return true;
2332        }
2333    }
2334
2335    class StoredDestination {
2336
2337        MessageOrderIndex orderIndex = new MessageOrderIndex();
2338        BTreeIndex<Location, Long> locationIndex;
2339        BTreeIndex<String, Long> messageIdIndex;
2340
2341        // These bits are only set for Topics
2342        BTreeIndex<String, KahaSubscriptionCommand> subscriptions;
2343        BTreeIndex<String, LastAck> subscriptionAcks;
2344        HashMap<String, MessageOrderCursor> subscriptionCursors;
2345        ListIndex<String, SequenceSet> ackPositions;
2346        ListIndex<String, Location> subLocations;
2347
2348        // Transient data used to track which Messages are no longer needed.
2349        final HashSet<String> subscriptionCache = new LinkedHashSet<>();
2350
2351        public void trackPendingAdd(Long seq) {
2352            orderIndex.trackPendingAdd(seq);
2353        }
2354
2355        public void trackPendingAddComplete(Long seq) {
2356            orderIndex.trackPendingAddComplete(seq);
2357        }
2358
2359        @Override
2360        public String toString() {
2361            return "nextSeq:" + orderIndex.nextMessageId + ",lastRet:" + orderIndex.cursor + ",pending:" + orderIndex.pendingAdditions.size();
2362        }
2363    }
2364
2365    protected class StoredDestinationMarshaller extends VariableMarshaller<StoredDestination> {
2366
2367        final MessageKeysMarshaller messageKeysMarshaller = new MessageKeysMarshaller();
2368
2369        @Override
2370        public StoredDestination readPayload(final DataInput dataIn) throws IOException {
2371            final StoredDestination value = new StoredDestination();
2372            value.orderIndex.defaultPriorityIndex = new BTreeIndex<>(pageFile, dataIn.readLong());
2373            value.locationIndex = new BTreeIndex<>(pageFile, dataIn.readLong());
2374            value.messageIdIndex = new BTreeIndex<>(pageFile, dataIn.readLong());
2375
2376            if (dataIn.readBoolean()) {
2377                value.subscriptions = new BTreeIndex<>(pageFile, dataIn.readLong());
2378                value.subscriptionAcks = new BTreeIndex<>(pageFile, dataIn.readLong());
2379                if (metadata.version >= 4) {
2380                    value.ackPositions = new ListIndex<>(pageFile, dataIn.readLong());
2381                } else {
2382                    // upgrade
2383                    pageFile.tx().execute(new Transaction.Closure<IOException>() {
2384                        @Override
2385                        public void execute(Transaction tx) throws IOException {
2386                            LinkedHashMap<String, SequenceSet> temp = new LinkedHashMap<>();
2387
2388                            if (metadata.version >= 3) {
2389                                // migrate
2390                                BTreeIndex<Long, HashSet<String>> oldAckPositions =
2391                                        new BTreeIndex<>(pageFile, dataIn.readLong());
2392                                oldAckPositions.setKeyMarshaller(LongMarshaller.INSTANCE);
2393                                oldAckPositions.setValueMarshaller(HashSetStringMarshaller.INSTANCE);
2394                                oldAckPositions.load(tx);
2395
2396
2397                                // Do the initial build of the data in memory before writing into the store
2398                                // based Ack Positions List to avoid a lot of disk thrashing.
2399                                Iterator<Entry<Long, HashSet<String>>> iterator = oldAckPositions.iterator(tx);
2400                                while (iterator.hasNext()) {
2401                                    Entry<Long, HashSet<String>> entry = iterator.next();
2402
2403                                    for(String subKey : entry.getValue()) {
2404                                        SequenceSet pendingAcks = temp.get(subKey);
2405                                        if (pendingAcks == null) {
2406                                            pendingAcks = new SequenceSet();
2407                                            temp.put(subKey, pendingAcks);
2408                                        }
2409
2410                                        pendingAcks.add(entry.getKey());
2411                                    }
2412                                }
2413                            }
2414                            // Now move the pending messages to ack data into the store backed
2415                            // structure.
2416                            value.ackPositions = new ListIndex<>(pageFile, tx.allocate());
2417                            value.ackPositions.setKeyMarshaller(StringMarshaller.INSTANCE);
2418                            value.ackPositions.setValueMarshaller(SequenceSet.Marshaller.INSTANCE);
2419                            value.ackPositions.load(tx);
2420                            for(String subscriptionKey : temp.keySet()) {
2421                                value.ackPositions.put(tx, subscriptionKey, temp.get(subscriptionKey));
2422                            }
2423
2424                        }
2425                    });
2426                }
2427
2428                if (metadata.version >= 5) {
2429                    value.subLocations = new ListIndex<>(pageFile, dataIn.readLong());
2430                } else {
2431                    // upgrade
2432                    pageFile.tx().execute(new Transaction.Closure<IOException>() {
2433                        @Override
2434                        public void execute(Transaction tx) throws IOException {
2435                            value.subLocations = new ListIndex<>(pageFile, tx.allocate());
2436                            value.subLocations.setKeyMarshaller(StringMarshaller.INSTANCE);
2437                            value.subLocations.setValueMarshaller(LocationMarshaller.INSTANCE);
2438                            value.subLocations.load(tx);
2439                        }
2440                    });
2441                }
2442            }
2443            if (metadata.version >= 2) {
2444                value.orderIndex.lowPriorityIndex = new BTreeIndex<>(pageFile, dataIn.readLong());
2445                value.orderIndex.highPriorityIndex = new BTreeIndex<>(pageFile, dataIn.readLong());
2446            } else {
2447                // upgrade
2448                pageFile.tx().execute(new Transaction.Closure<IOException>() {
2449                    @Override
2450                    public void execute(Transaction tx) throws IOException {
2451                        value.orderIndex.lowPriorityIndex = new BTreeIndex<>(pageFile, tx.allocate());
2452                        value.orderIndex.lowPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE);
2453                        value.orderIndex.lowPriorityIndex.setValueMarshaller(messageKeysMarshaller);
2454                        value.orderIndex.lowPriorityIndex.load(tx);
2455
2456                        value.orderIndex.highPriorityIndex = new BTreeIndex<>(pageFile, tx.allocate());
2457                        value.orderIndex.highPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE);
2458                        value.orderIndex.highPriorityIndex.setValueMarshaller(messageKeysMarshaller);
2459                        value.orderIndex.highPriorityIndex.load(tx);
2460                    }
2461                });
2462            }
2463
2464            return value;
2465        }
2466
2467        @Override
2468        public void writePayload(StoredDestination value, DataOutput dataOut) throws IOException {
2469            dataOut.writeLong(value.orderIndex.defaultPriorityIndex.getPageId());
2470            dataOut.writeLong(value.locationIndex.getPageId());
2471            dataOut.writeLong(value.messageIdIndex.getPageId());
2472            if (value.subscriptions != null) {
2473                dataOut.writeBoolean(true);
2474                dataOut.writeLong(value.subscriptions.getPageId());
2475                dataOut.writeLong(value.subscriptionAcks.getPageId());
2476                dataOut.writeLong(value.ackPositions.getHeadPageId());
2477                dataOut.writeLong(value.subLocations.getHeadPageId());
2478            } else {
2479                dataOut.writeBoolean(false);
2480            }
2481            dataOut.writeLong(value.orderIndex.lowPriorityIndex.getPageId());
2482            dataOut.writeLong(value.orderIndex.highPriorityIndex.getPageId());
2483        }
2484    }
2485
2486    static class KahaSubscriptionCommandMarshaller extends VariableMarshaller<KahaSubscriptionCommand> {
2487        final static KahaSubscriptionCommandMarshaller INSTANCE = new KahaSubscriptionCommandMarshaller();
2488
2489        @Override
2490        public KahaSubscriptionCommand readPayload(DataInput dataIn) throws IOException {
2491            KahaSubscriptionCommand rc = new KahaSubscriptionCommand();
2492            rc.mergeFramed((InputStream)dataIn);
2493            return rc;
2494        }
2495
2496        @Override
2497        public void writePayload(KahaSubscriptionCommand object, DataOutput dataOut) throws IOException {
2498            object.writeFramed((OutputStream)dataOut);
2499        }
2500    }
2501
2502    protected StoredDestination getStoredDestination(KahaDestination destination, Transaction tx) throws IOException {
2503        String key = key(destination);
2504        StoredDestination rc = storedDestinations.get(key);
2505        if (rc == null) {
2506            boolean topic = destination.getType() == KahaDestination.DestinationType.TOPIC || destination.getType() == KahaDestination.DestinationType.TEMP_TOPIC;
2507            rc = loadStoredDestination(tx, key, topic);
2508            // Cache it. We may want to remove/unload destinations from the
2509            // cache that are not used for a while
2510            // to reduce memory usage.
2511            storedDestinations.put(key, rc);
2512        }
2513        return rc;
2514    }
2515
2516    protected StoredDestination getExistingStoredDestination(KahaDestination destination, Transaction tx) throws IOException {
2517        String key = key(destination);
2518        StoredDestination rc = storedDestinations.get(key);
2519        if (rc == null && metadata.destinations.containsKey(tx, key)) {
2520            rc = getStoredDestination(destination, tx);
2521        }
2522        return rc;
2523    }
2524
2525    /**
2526     * @param tx
2527     * @param key
2528     * @param topic
2529     * @return
2530     * @throws IOException
2531     */
2532    private StoredDestination loadStoredDestination(Transaction tx, String key, boolean topic) throws IOException {
2533        // Try to load the existing indexes..
2534        StoredDestination rc = metadata.destinations.get(tx, key);
2535        if (rc == null) {
2536            // Brand new destination.. allocate indexes for it.
2537            rc = new StoredDestination();
2538            rc.orderIndex.allocate(tx);
2539            rc.locationIndex = new BTreeIndex<>(pageFile, tx.allocate());
2540            rc.messageIdIndex = new BTreeIndex<>(pageFile, tx.allocate());
2541
2542            if (topic) {
2543                rc.subscriptions = new BTreeIndex<>(pageFile, tx.allocate());
2544                rc.subscriptionAcks = new BTreeIndex<>(pageFile, tx.allocate());
2545                rc.ackPositions = new ListIndex<>(pageFile, tx.allocate());
2546                rc.subLocations = new ListIndex<>(pageFile, tx.allocate());
2547            }
2548            metadata.destinations.put(tx, key, rc);
2549        }
2550
2551        // Configure the marshalers and load.
2552        rc.orderIndex.load(tx);
2553
2554        // Figure out the next key using the last entry in the destination.
2555        rc.orderIndex.configureLast(tx);
2556
2557        rc.locationIndex.setKeyMarshaller(new LocationSizeMarshaller());
2558        rc.locationIndex.setValueMarshaller(LongMarshaller.INSTANCE);
2559        rc.locationIndex.load(tx);
2560
2561        rc.messageIdIndex.setKeyMarshaller(StringMarshaller.INSTANCE);
2562        rc.messageIdIndex.setValueMarshaller(LongMarshaller.INSTANCE);
2563        rc.messageIdIndex.load(tx);
2564
2565        //go through an upgrade old index if older than version 6
2566        if (metadata.version < 6) {
2567            for (Iterator<Entry<Location, Long>> iterator = rc.locationIndex.iterator(tx); iterator.hasNext(); ) {
2568                Entry<Location, Long> entry = iterator.next();
2569                // modify so it is upgraded
2570                rc.locationIndex.put(tx, entry.getKey(), entry.getValue());
2571            }
2572            //upgrade the order index
2573            for (Iterator<Entry<Long, MessageKeys>> iterator = rc.orderIndex.iterator(tx); iterator.hasNext(); ) {
2574                Entry<Long, MessageKeys> entry = iterator.next();
2575                //call get so that the last priority is updated
2576                rc.orderIndex.get(tx, entry.getKey());
2577                rc.orderIndex.put(tx, rc.orderIndex.lastGetPriority(), entry.getKey(), entry.getValue());
2578            }
2579        }
2580
2581        // If it was a topic...
2582        if (topic) {
2583
2584            rc.subscriptions.setKeyMarshaller(StringMarshaller.INSTANCE);
2585            rc.subscriptions.setValueMarshaller(KahaSubscriptionCommandMarshaller.INSTANCE);
2586            rc.subscriptions.load(tx);
2587
2588            rc.subscriptionAcks.setKeyMarshaller(StringMarshaller.INSTANCE);
2589            rc.subscriptionAcks.setValueMarshaller(new LastAckMarshaller());
2590            rc.subscriptionAcks.load(tx);
2591
2592            rc.ackPositions.setKeyMarshaller(StringMarshaller.INSTANCE);
2593            rc.ackPositions.setValueMarshaller(SequenceSet.Marshaller.INSTANCE);
2594            rc.ackPositions.load(tx);
2595
2596            rc.subLocations.setKeyMarshaller(StringMarshaller.INSTANCE);
2597            rc.subLocations.setValueMarshaller(LocationMarshaller.INSTANCE);
2598            rc.subLocations.load(tx);
2599
2600            rc.subscriptionCursors = new HashMap<>();
2601
2602            if (metadata.version < 3) {
2603
2604                // on upgrade need to fill ackLocation with available messages past last ack
2605                for (Iterator<Entry<String, LastAck>> iterator = rc.subscriptionAcks.iterator(tx); iterator.hasNext(); ) {
2606                    Entry<String, LastAck> entry = iterator.next();
2607                    for (Iterator<Entry<Long, MessageKeys>> orderIterator =
2608                            rc.orderIndex.iterator(tx, new MessageOrderCursor(entry.getValue().lastAckedSequence)); orderIterator.hasNext(); ) {
2609                        Long sequence = orderIterator.next().getKey();
2610                        addAckLocation(tx, rc, sequence, entry.getKey());
2611                    }
2612                    // modify so it is upgraded
2613                    rc.subscriptionAcks.put(tx, entry.getKey(), entry.getValue());
2614                }
2615            }
2616
2617            // Configure the message references index
2618
2619
2620            // Configure the subscription cache
2621            for (Iterator<Entry<String, LastAck>> iterator = rc.subscriptionAcks.iterator(tx); iterator.hasNext(); ) {
2622                Entry<String, LastAck> entry = iterator.next();
2623                rc.subscriptionCache.add(entry.getKey());
2624            }
2625
2626            if (rc.orderIndex.nextMessageId == 0) {
2627                // check for existing durable sub all acked out - pull next seq from acks as messages are gone
2628                if (!rc.subscriptionAcks.isEmpty(tx)) {
2629                    for (Iterator<Entry<String, LastAck>> iterator = rc.subscriptionAcks.iterator(tx); iterator.hasNext();) {
2630                        Entry<String, LastAck> entry = iterator.next();
2631                        rc.orderIndex.nextMessageId =
2632                                Math.max(rc.orderIndex.nextMessageId, entry.getValue().lastAckedSequence +1);
2633                    }
2634                }
2635            } else {
2636                // update based on ackPositions for unmatched, last entry is always the next
2637                Iterator<Entry<String, SequenceSet>> subscriptions = rc.ackPositions.iterator(tx);
2638                while (subscriptions.hasNext()) {
2639                    Entry<String, SequenceSet> subscription = subscriptions.next();
2640                    SequenceSet pendingAcks = subscription.getValue();
2641                    if (pendingAcks != null && !pendingAcks.isEmpty()) {
2642                        for (Long sequenceId : pendingAcks) {
2643                            rc.orderIndex.nextMessageId = Math.max(rc.orderIndex.nextMessageId, sequenceId);
2644                        }
2645                    }
2646                }
2647            }
2648        }
2649
2650        if (metadata.version < VERSION) {
2651            // store again after upgrade
2652            metadata.destinations.put(tx, key, rc);
2653        }
2654        return rc;
2655    }
2656
2657    /**
2658     * Clear the counter for the destination, if one exists.
2659     *
2660     * @param kahaDestination
2661     */
2662    protected void clearStoreStats(KahaDestination kahaDestination) {
2663        String key = key(kahaDestination);
2664        MessageStoreStatistics storeStats = getStoreStats(key);
2665        MessageStoreSubscriptionStatistics subStats = getSubStats(key);
2666        if (storeStats != null) {
2667            storeStats.reset();
2668        }
2669        if (subStats != null) {
2670            subStats.reset();
2671        }
2672    }
2673
2674    /**
2675     * Update MessageStoreStatistics
2676     *
2677     * @param kahaDestination
2678     * @param size
2679     */
2680    protected void incrementAndAddSizeToStoreStat(KahaDestination kahaDestination, long size) {
2681        incrementAndAddSizeToStoreStat(key(kahaDestination), size);
2682    }
2683
2684    protected void incrementAndAddSizeToStoreStat(String kahaDestKey, long size) {
2685        MessageStoreStatistics storeStats = getStoreStats(kahaDestKey);
2686        if (storeStats != null) {
2687            storeStats.getMessageCount().increment();
2688            if (size > 0) {
2689                storeStats.getMessageSize().addSize(size);
2690            }
2691        }
2692    }
2693
2694    protected void decrementAndSubSizeToStoreStat(KahaDestination kahaDestination, long size) {
2695        decrementAndSubSizeToStoreStat(key(kahaDestination), size);
2696    }
2697
2698    protected void decrementAndSubSizeToStoreStat(String kahaDestKey, long size) {
2699        MessageStoreStatistics storeStats = getStoreStats(kahaDestKey);
2700        if (storeStats != null) {
2701            storeStats.getMessageCount().decrement();
2702            if (size > 0) {
2703                storeStats.getMessageSize().addSize(-size);
2704            }
2705        }
2706    }
2707
2708    protected void incrementAndAddSizeToStoreStat(KahaDestination kahaDestination, String subKey, long size) {
2709        incrementAndAddSizeToStoreStat(key(kahaDestination), subKey, size);
2710    }
2711
2712    protected void incrementAndAddSizeToStoreStat(String kahaDestKey, String subKey, long size) {
2713        if (enableSubscriptionStatistics) {
2714            MessageStoreSubscriptionStatistics subStats = getSubStats(kahaDestKey);
2715            if (subStats != null && subKey != null) {
2716                subStats.getMessageCount(subKey).increment();
2717                if (size > 0) {
2718                    subStats.getMessageSize(subKey).addSize(size);
2719                }
2720            }
2721        }
2722    }
2723
2724
2725    protected void decrementAndSubSizeToStoreStat(String kahaDestKey, String subKey, long size) {
2726        if (enableSubscriptionStatistics) {
2727            MessageStoreSubscriptionStatistics subStats = getSubStats(kahaDestKey);
2728            if (subStats != null && subKey != null) {
2729                subStats.getMessageCount(subKey).decrement();
2730                if (size > 0) {
2731                    subStats.getMessageSize(subKey).addSize(-size);
2732                }
2733            }
2734        }
2735    }
2736
2737    protected void decrementAndSubSizeToStoreStat(KahaDestination kahaDestination, String subKey, long size) {
2738        decrementAndSubSizeToStoreStat(key(kahaDestination), subKey, size);
2739    }
2740
2741    /**
2742     * This is a map to cache MessageStores for a specific
2743     * KahaDestination key
2744     */
2745    protected final ConcurrentMap<String, MessageStore> storeCache =
2746            new ConcurrentHashMap<>();
2747
2748    /**
2749     * Locate the storeMessageSize counter for this KahaDestination
2750     */
2751    protected MessageStoreStatistics getStoreStats(String kahaDestKey) {
2752        MessageStoreStatistics storeStats = null;
2753        try {
2754            MessageStore messageStore = storeCache.get(kahaDestKey);
2755            if (messageStore != null) {
2756                storeStats = messageStore.getMessageStoreStatistics();
2757            }
2758        } catch (Exception e1) {
2759             LOG.error("Getting size counter of destination failed", e1);
2760        }
2761
2762        return storeStats;
2763    }
2764
2765    protected MessageStoreSubscriptionStatistics getSubStats(String kahaDestKey) {
2766        MessageStoreSubscriptionStatistics subStats = null;
2767        try {
2768            MessageStore messageStore = storeCache.get(kahaDestKey);
2769            if (messageStore instanceof TopicMessageStore) {
2770                subStats = ((TopicMessageStore)messageStore).getMessageStoreSubStatistics();
2771            }
2772        } catch (Exception e1) {
2773             LOG.error("Getting size counter of destination failed", e1);
2774        }
2775
2776        return subStats;
2777    }
2778
2779    /**
2780     * Determine whether this Destination matches the DestinationType
2781     *
2782     * @param destination
2783     * @param type
2784     * @return
2785     */
2786    protected boolean matchType(Destination destination,
2787            KahaDestination.DestinationType type) {
2788        if (destination instanceof Topic
2789                && type.equals(KahaDestination.DestinationType.TOPIC)) {
2790            return true;
2791        } else if (destination instanceof Queue
2792                && type.equals(KahaDestination.DestinationType.QUEUE)) {
2793            return true;
2794        }
2795        return false;
2796    }
2797
2798    class LocationSizeMarshaller implements Marshaller<Location> {
2799
2800        public LocationSizeMarshaller() {
2801
2802        }
2803
2804        @Override
2805        public Location readPayload(DataInput dataIn) throws IOException {
2806            Location rc = new Location();
2807            rc.setDataFileId(dataIn.readInt());
2808            rc.setOffset(dataIn.readInt());
2809            if (metadata.version >= 6) {
2810                rc.setSize(dataIn.readInt());
2811            }
2812            return rc;
2813        }
2814
2815        @Override
2816        public void writePayload(Location object, DataOutput dataOut)
2817                throws IOException {
2818            dataOut.writeInt(object.getDataFileId());
2819            dataOut.writeInt(object.getOffset());
2820            dataOut.writeInt(object.getSize());
2821        }
2822
2823        @Override
2824        public int getFixedSize() {
2825            return 12;
2826        }
2827
2828        @Override
2829        public Location deepCopy(Location source) {
2830            return new Location(source);
2831        }
2832
2833        @Override
2834        public boolean isDeepCopySupported() {
2835            return true;
2836        }
2837    }
2838
2839    private void addAckLocation(Transaction tx, StoredDestination sd, Long messageSequence, String subscriptionKey) throws IOException {
2840        SequenceSet sequences = sd.ackPositions.get(tx, subscriptionKey);
2841        if (sequences == null) {
2842            sequences = new SequenceSet();
2843            sequences.add(messageSequence);
2844            sd.ackPositions.add(tx, subscriptionKey, sequences);
2845        } else {
2846            sequences.add(messageSequence);
2847            sd.ackPositions.put(tx, subscriptionKey, sequences);
2848        }
2849    }
2850
2851    // new sub is interested in potentially all existing messages
2852    private void addAckLocationForRetroactiveSub(Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException {
2853        SequenceSet allOutstanding = new SequenceSet();
2854        Iterator<Map.Entry<String, SequenceSet>> iterator = sd.ackPositions.iterator(tx);
2855        while (iterator.hasNext()) {
2856            SequenceSet set = iterator.next().getValue();
2857            for (Long entry : set) {
2858                allOutstanding.add(entry);
2859            }
2860        }
2861        sd.ackPositions.put(tx, subscriptionKey, allOutstanding);
2862    }
2863
2864    // on a new message add, all existing subs are interested in this message
2865    private void addAckLocationForNewMessage(Transaction tx, KahaDestination kahaDest,
2866            StoredDestination sd, Long messageSequence) throws IOException {
2867        for(String subscriptionKey : sd.subscriptionCache) {
2868            SequenceSet sequences = sd.ackPositions.get(tx, subscriptionKey);
2869            if (sequences == null) {
2870                sequences = new SequenceSet();
2871                sequences.add(new Sequence(messageSequence, messageSequence + 1));
2872                sd.ackPositions.add(tx, subscriptionKey, sequences);
2873            } else {
2874                sequences.add(new Sequence(messageSequence, messageSequence + 1));
2875                sd.ackPositions.put(tx, subscriptionKey, sequences);
2876            }
2877
2878            MessageKeys key = sd.orderIndex.get(tx, messageSequence);
2879            incrementAndAddSizeToStoreStat(kahaDest, subscriptionKey, key.location.getSize());
2880        }
2881    }
2882
2883    private void removeAckLocationsForSub(KahaSubscriptionCommand command,
2884            Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException {
2885        if (!sd.ackPositions.isEmpty(tx)) {
2886            SequenceSet sequences = sd.ackPositions.remove(tx, subscriptionKey);
2887            if (sequences == null || sequences.isEmpty()) {
2888                return;
2889            }
2890
2891            ArrayList<Long> unreferenced = new ArrayList<>();
2892
2893            for(Long sequenceId : sequences) {
2894                if(!isSequenceReferenced(tx, sd, sequenceId)) {
2895                    unreferenced.add(sequenceId);
2896                }
2897            }
2898
2899            for(Long sequenceId : unreferenced) {
2900                // Find all the entries that need to get deleted.
2901                ArrayList<Entry<Long, MessageKeys>> deletes = new ArrayList<>();
2902                sd.orderIndex.getDeleteList(tx, deletes, sequenceId);
2903
2904                // Do the actual deletes.
2905                for (Entry<Long, MessageKeys> entry : deletes) {
2906                    sd.locationIndex.remove(tx, entry.getValue().location);
2907                    sd.messageIdIndex.remove(tx, entry.getValue().messageId);
2908                    sd.orderIndex.remove(tx, entry.getKey());
2909                    decrementAndSubSizeToStoreStat(command.getDestination(), entry.getValue().location.getSize());
2910                }
2911            }
2912        }
2913    }
2914
2915    private boolean isSequenceReferenced(final Transaction tx, final StoredDestination sd, final Long sequenceId) throws IOException {
2916        for(String subscriptionKey : sd.subscriptionCache) {
2917            SequenceSet sequence = sd.ackPositions.get(tx, subscriptionKey);
2918            if (sequence != null && sequence.contains(sequenceId)) {
2919                return true;
2920            }
2921        }
2922        return false;
2923    }
2924
2925    /**
2926     * @param tx
2927     * @param sd
2928     * @param subscriptionKey
2929     * @param messageSequence
2930     * @throws IOException
2931     */
2932    private void removeAckLocation(KahaRemoveMessageCommand command,
2933            Transaction tx, StoredDestination sd, String subscriptionKey,
2934            Long messageSequence) throws IOException {
2935        // Remove the sub from the previous location set..
2936        if (messageSequence != null) {
2937            SequenceSet range = sd.ackPositions.get(tx, subscriptionKey);
2938            if (range != null && !range.isEmpty()) {
2939                range.remove(messageSequence);
2940                if (!range.isEmpty()) {
2941                    sd.ackPositions.put(tx, subscriptionKey, range);
2942                } else {
2943                    sd.ackPositions.remove(tx, subscriptionKey);
2944                }
2945
2946                MessageKeys key = sd.orderIndex.get(tx, messageSequence);
2947                decrementAndSubSizeToStoreStat(command.getDestination(), subscriptionKey,
2948                        key.location.getSize());
2949
2950                // Check if the message is reference by any other subscription.
2951                if (isSequenceReferenced(tx, sd, messageSequence)) {
2952                    return;
2953                }
2954                // Find all the entries that need to get deleted.
2955                ArrayList<Entry<Long, MessageKeys>> deletes = new ArrayList<>();
2956                sd.orderIndex.getDeleteList(tx, deletes, messageSequence);
2957
2958                // Do the actual deletes.
2959                for (Entry<Long, MessageKeys> entry : deletes) {
2960                    sd.locationIndex.remove(tx, entry.getValue().location);
2961                    sd.messageIdIndex.remove(tx, entry.getValue().messageId);
2962                    sd.orderIndex.remove(tx, entry.getKey());
2963                    decrementAndSubSizeToStoreStat(command.getDestination(), entry.getValue().location.getSize());
2964                }
2965            }
2966        }
2967    }
2968
2969    public LastAck getLastAck(Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException {
2970        return sd.subscriptionAcks.get(tx, subscriptionKey);
2971    }
2972
2973    protected long getStoredMessageCount(Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException {
2974        if (sd.ackPositions != null) {
2975            SequenceSet messageSequences = sd.ackPositions.get(tx, subscriptionKey);
2976            if (messageSequences != null) {
2977                long result = messageSequences.rangeSize();
2978                // if there's anything in the range the last value is always the nextMessage marker, so remove 1.
2979                return result > 0 ? result - 1 : 0;
2980            }
2981        }
2982
2983        return 0;
2984    }
2985
2986    protected long getStoredMessageSize(Transaction tx, StoredDestination sd, String subscriptionKey) throws IOException {
2987        long locationSize = 0;
2988
2989        if (sd.ackPositions != null) {
2990            //grab the messages attached to this subscription
2991            SequenceSet messageSequences = sd.ackPositions.get(tx, subscriptionKey);
2992
2993            if (messageSequences != null) {
2994                Sequence head = messageSequences.getHead();
2995                if (head != null) {
2996                    //get an iterator over the order index starting at the first unacked message
2997                    //and go over each message to add up the size
2998                    Iterator<Entry<Long, MessageKeys>> iterator = sd.orderIndex.iterator(tx,
2999                            new MessageOrderCursor(head.getFirst()));
3000
3001                    while (iterator.hasNext()) {
3002                        Entry<Long, MessageKeys> entry = iterator.next();
3003                        locationSize += entry.getValue().location.getSize();
3004                    }
3005                }
3006            }
3007        }
3008
3009        return locationSize;
3010    }
3011
3012    protected String key(KahaDestination destination) {
3013        return destination.getType().getNumber() + ":" + destination.getName();
3014    }
3015
3016    // /////////////////////////////////////////////////////////////////
3017    // Transaction related implementation methods.
3018    // /////////////////////////////////////////////////////////////////
3019    @SuppressWarnings("rawtypes")
3020    private final LinkedHashMap<TransactionId, List<Operation>> inflightTransactions = new LinkedHashMap<>();
3021    @SuppressWarnings("rawtypes")
3022    protected final LinkedHashMap<TransactionId, List<Operation>> preparedTransactions = new LinkedHashMap<>();
3023    protected final Set<String> ackedAndPrepared = new HashSet<>();
3024    protected final Set<String> rolledBackAcks = new HashSet<>();
3025
3026    // messages that have prepared (pending) acks cannot be re-dispatched unless the outcome is rollback,
3027    // till then they are skipped by the store.
3028    // 'at most once' XA guarantee
3029    public void trackRecoveredAcks(ArrayList<MessageAck> acks) {
3030        this.indexLock.writeLock().lock();
3031        try {
3032            for (MessageAck ack : acks) {
3033                ackedAndPrepared.add(ack.getLastMessageId().toProducerKey());
3034            }
3035        } finally {
3036            this.indexLock.writeLock().unlock();
3037        }
3038    }
3039
3040    public void forgetRecoveredAcks(ArrayList<MessageAck> acks, boolean rollback) throws IOException {
3041        if (acks != null) {
3042            this.indexLock.writeLock().lock();
3043            try {
3044                for (MessageAck ack : acks) {
3045                    final String id = ack.getLastMessageId().toProducerKey();
3046                    ackedAndPrepared.remove(id);
3047                    if (rollback) {
3048                        rolledBackAcks.add(id);
3049                    }
3050                }
3051            } finally {
3052                this.indexLock.writeLock().unlock();
3053            }
3054        }
3055    }
3056
3057    @SuppressWarnings("rawtypes")
3058    private List<Operation> getInflightTx(KahaTransactionInfo info) {
3059        TransactionId key = TransactionIdConversion.convert(info);
3060        List<Operation> tx;
3061        synchronized (inflightTransactions) {
3062            tx = inflightTransactions.get(key);
3063            if (tx == null) {
3064                tx = Collections.synchronizedList(new ArrayList<Operation>());
3065                inflightTransactions.put(key, tx);
3066            }
3067        }
3068        return tx;
3069    }
3070
3071    @SuppressWarnings("unused")
3072    private TransactionId key(KahaTransactionInfo transactionInfo) {
3073        return TransactionIdConversion.convert(transactionInfo);
3074    }
3075
3076    abstract class Operation <T extends JournalCommand<T>> {
3077        final T command;
3078        final Location location;
3079
3080        public Operation(T command, Location location) {
3081            this.command = command;
3082            this.location = location;
3083        }
3084
3085        public Location getLocation() {
3086            return location;
3087        }
3088
3089        public T getCommand() {
3090            return command;
3091        }
3092
3093        abstract public void execute(Transaction tx) throws IOException;
3094    }
3095
3096    class AddOperation extends Operation<KahaAddMessageCommand> {
3097        final IndexAware runWithIndexLock;
3098        public AddOperation(KahaAddMessageCommand command, Location location, IndexAware runWithIndexLock) {
3099            super(command, location);
3100            this.runWithIndexLock = runWithIndexLock;
3101        }
3102
3103        @Override
3104        public void execute(Transaction tx) throws IOException {
3105            long seq = updateIndex(tx, command, location);
3106            if (runWithIndexLock != null) {
3107                runWithIndexLock.sequenceAssignedWithIndexLocked(seq);
3108            }
3109        }
3110    }
3111
3112    class RemoveOperation extends Operation<KahaRemoveMessageCommand> {
3113
3114        public RemoveOperation(KahaRemoveMessageCommand command, Location location) {
3115            super(command, location);
3116        }
3117
3118        @Override
3119        public void execute(Transaction tx) throws IOException {
3120            updateIndex(tx, command, location);
3121        }
3122    }
3123
3124    // /////////////////////////////////////////////////////////////////
3125    // Initialization related implementation methods.
3126    // /////////////////////////////////////////////////////////////////
3127
3128    private PageFile createPageFile() throws IOException {
3129        if (indexDirectory == null) {
3130            indexDirectory = directory;
3131        }
3132        IOHelper.mkdirs(indexDirectory);
3133        PageFile index = new PageFile(indexDirectory, "db");
3134        index.setEnableWriteThread(isEnableIndexWriteAsync());
3135        index.setWriteBatchSize(getIndexWriteBatchSize());
3136        index.setPageCacheSize(indexCacheSize);
3137        index.setUseLFRUEviction(isUseIndexLFRUEviction());
3138        index.setLFUEvictionFactor(getIndexLFUEvictionFactor());
3139        index.setEnableDiskSyncs(isEnableIndexDiskSyncs());
3140        index.setEnableRecoveryFile(isEnableIndexRecoveryFile());
3141        index.setEnablePageCaching(isEnableIndexPageCaching());
3142        return index;
3143    }
3144
3145    protected Journal createJournal() throws IOException {
3146        Journal manager = new Journal();
3147        manager.setDirectory(directory);
3148        manager.setMaxFileLength(getJournalMaxFileLength());
3149        manager.setCheckForCorruptionOnStartup(checkForCorruptJournalFiles);
3150        manager.setChecksum(checksumJournalFiles || checkForCorruptJournalFiles);
3151        manager.setWriteBatchSize(getJournalMaxWriteBatchSize());
3152        manager.setArchiveDataLogs(isArchiveDataLogs());
3153        manager.setSizeAccumulator(journalSize);
3154        manager.setEnableAsyncDiskSync(isEnableJournalDiskSyncs());
3155        manager.setPreallocationScope(Journal.PreallocationScope.valueOf(preallocationScope.trim().toUpperCase()));
3156        manager.setPreallocationStrategy(
3157                Journal.PreallocationStrategy.valueOf(preallocationStrategy.trim().toUpperCase()));
3158        manager.setJournalDiskSyncStrategy(journalDiskSyncStrategy);
3159        if (getDirectoryArchive() != null) {
3160            IOHelper.mkdirs(getDirectoryArchive());
3161            manager.setDirectoryArchive(getDirectoryArchive());
3162        }
3163        return manager;
3164    }
3165
3166    private Metadata createMetadata() {
3167        Metadata md = new Metadata();
3168        md.producerSequenceIdTracker.setAuditDepth(getFailoverProducersAuditDepth());
3169        md.producerSequenceIdTracker.setMaximumNumberOfProducersToTrack(getMaxFailoverProducersToTrack());
3170        return md;
3171    }
3172
3173    protected abstract void configureMetadata();
3174
3175    public int getJournalMaxWriteBatchSize() {
3176        return journalMaxWriteBatchSize;
3177    }
3178
3179    public void setJournalMaxWriteBatchSize(int journalMaxWriteBatchSize) {
3180        this.journalMaxWriteBatchSize = journalMaxWriteBatchSize;
3181    }
3182
3183    public File getDirectory() {
3184        return directory;
3185    }
3186
3187    public void setDirectory(File directory) {
3188        this.directory = directory;
3189    }
3190
3191    public boolean isDeleteAllMessages() {
3192        return deleteAllMessages;
3193    }
3194
3195    public void setDeleteAllMessages(boolean deleteAllMessages) {
3196        this.deleteAllMessages = deleteAllMessages;
3197    }
3198
3199    public void setIndexWriteBatchSize(int setIndexWriteBatchSize) {
3200        this.setIndexWriteBatchSize = setIndexWriteBatchSize;
3201    }
3202
3203    public int getIndexWriteBatchSize() {
3204        return setIndexWriteBatchSize;
3205    }
3206
3207    public void setEnableIndexWriteAsync(boolean enableIndexWriteAsync) {
3208        this.enableIndexWriteAsync = enableIndexWriteAsync;
3209    }
3210
3211    boolean isEnableIndexWriteAsync() {
3212        return enableIndexWriteAsync;
3213    }
3214
3215    /**
3216     * @deprecated use {@link #getJournalDiskSyncStrategyEnum} or {@link #getJournalDiskSyncStrategy} instead
3217     * @return
3218     */
3219    @Deprecated
3220    public boolean isEnableJournalDiskSyncs() {
3221        return journalDiskSyncStrategy == JournalDiskSyncStrategy.ALWAYS;
3222    }
3223
3224    /**
3225     * @deprecated use {@link #setEnableJournalDiskSyncs} instead
3226     * @param syncWrites
3227     */
3228    @Deprecated
3229    public void setEnableJournalDiskSyncs(boolean syncWrites) {
3230        if (syncWrites) {
3231            journalDiskSyncStrategy = JournalDiskSyncStrategy.ALWAYS;
3232        } else {
3233            journalDiskSyncStrategy = JournalDiskSyncStrategy.NEVER;
3234        }
3235    }
3236
3237    public JournalDiskSyncStrategy getJournalDiskSyncStrategyEnum() {
3238        return journalDiskSyncStrategy;
3239    }
3240
3241    public String getJournalDiskSyncStrategy() {
3242        return journalDiskSyncStrategy.name();
3243    }
3244
3245    public void setJournalDiskSyncStrategy(String journalDiskSyncStrategy) {
3246        this.journalDiskSyncStrategy = JournalDiskSyncStrategy.valueOf(journalDiskSyncStrategy.trim().toUpperCase());
3247    }
3248
3249    public long getJournalDiskSyncInterval() {
3250        return journalDiskSyncInterval;
3251    }
3252
3253    public void setJournalDiskSyncInterval(long journalDiskSyncInterval) {
3254        this.journalDiskSyncInterval = journalDiskSyncInterval;
3255    }
3256
3257    public long getCheckpointInterval() {
3258        return checkpointInterval;
3259    }
3260
3261    public void setCheckpointInterval(long checkpointInterval) {
3262        this.checkpointInterval = checkpointInterval;
3263    }
3264
3265    public long getCleanupInterval() {
3266        return cleanupInterval;
3267    }
3268
3269    public void setCleanupInterval(long cleanupInterval) {
3270        this.cleanupInterval = cleanupInterval;
3271    }
3272
3273    public void setJournalMaxFileLength(int journalMaxFileLength) {
3274        this.journalMaxFileLength = journalMaxFileLength;
3275    }
3276
3277    public int getJournalMaxFileLength() {
3278        return journalMaxFileLength;
3279    }
3280
3281    public void setMaxFailoverProducersToTrack(int maxFailoverProducersToTrack) {
3282        this.metadata.producerSequenceIdTracker.setMaximumNumberOfProducersToTrack(maxFailoverProducersToTrack);
3283    }
3284
3285    public int getMaxFailoverProducersToTrack() {
3286        return this.metadata.producerSequenceIdTracker.getMaximumNumberOfProducersToTrack();
3287    }
3288
3289    public void setFailoverProducersAuditDepth(int failoverProducersAuditDepth) {
3290        this.metadata.producerSequenceIdTracker.setAuditDepth(failoverProducersAuditDepth);
3291    }
3292
3293    public int getFailoverProducersAuditDepth() {
3294        return this.metadata.producerSequenceIdTracker.getAuditDepth();
3295    }
3296
3297    public PageFile getPageFile() throws IOException {
3298        if (pageFile == null) {
3299            pageFile = createPageFile();
3300        }
3301        return pageFile;
3302    }
3303
3304    public Journal getJournal() throws IOException {
3305        if (journal == null) {
3306            journal = createJournal();
3307        }
3308        return journal;
3309    }
3310
3311    protected Metadata getMetadata() {
3312        return metadata;
3313    }
3314
3315    public boolean isFailIfDatabaseIsLocked() {
3316        return failIfDatabaseIsLocked;
3317    }
3318
3319    public void setFailIfDatabaseIsLocked(boolean failIfDatabaseIsLocked) {
3320        this.failIfDatabaseIsLocked = failIfDatabaseIsLocked;
3321    }
3322
3323    public boolean isIgnoreMissingJournalfiles() {
3324        return ignoreMissingJournalfiles;
3325    }
3326
3327    public void setIgnoreMissingJournalfiles(boolean ignoreMissingJournalfiles) {
3328        this.ignoreMissingJournalfiles = ignoreMissingJournalfiles;
3329    }
3330
3331    public int getIndexCacheSize() {
3332        return indexCacheSize;
3333    }
3334
3335    public void setIndexCacheSize(int indexCacheSize) {
3336        this.indexCacheSize = indexCacheSize;
3337    }
3338
3339    public boolean isCheckForCorruptJournalFiles() {
3340        return checkForCorruptJournalFiles;
3341    }
3342
3343    public void setCheckForCorruptJournalFiles(boolean checkForCorruptJournalFiles) {
3344        this.checkForCorruptJournalFiles = checkForCorruptJournalFiles;
3345    }
3346
3347    public PurgeRecoveredXATransactionStrategy getPurgeRecoveredXATransactionStrategyEnum() {
3348        return purgeRecoveredXATransactionStrategy;
3349    }
3350
3351    public String getPurgeRecoveredXATransactionStrategy() {
3352        return purgeRecoveredXATransactionStrategy.name();
3353    }
3354
3355    public void setPurgeRecoveredXATransactionStrategy(String purgeRecoveredXATransactionStrategy) {
3356        this.purgeRecoveredXATransactionStrategy = PurgeRecoveredXATransactionStrategy.valueOf(
3357                purgeRecoveredXATransactionStrategy.trim().toUpperCase());
3358    }
3359
3360    public boolean isChecksumJournalFiles() {
3361        return checksumJournalFiles;
3362    }
3363
3364    public void setChecksumJournalFiles(boolean checksumJournalFiles) {
3365        this.checksumJournalFiles = checksumJournalFiles;
3366    }
3367
3368    @Override
3369    public void setBrokerService(BrokerService brokerService) {
3370        this.brokerService = brokerService;
3371    }
3372
3373    /**
3374     * @return the archiveDataLogs
3375     */
3376    public boolean isArchiveDataLogs() {
3377        return this.archiveDataLogs;
3378    }
3379
3380    /**
3381     * @param archiveDataLogs the archiveDataLogs to set
3382     */
3383    public void setArchiveDataLogs(boolean archiveDataLogs) {
3384        this.archiveDataLogs = archiveDataLogs;
3385    }
3386
3387    /**
3388     * @return the directoryArchive
3389     */
3390    public File getDirectoryArchive() {
3391        return this.directoryArchive;
3392    }
3393
3394    /**
3395     * @param directoryArchive the directoryArchive to set
3396     */
3397    public void setDirectoryArchive(File directoryArchive) {
3398        this.directoryArchive = directoryArchive;
3399    }
3400
3401    public boolean isArchiveCorruptedIndex() {
3402        return archiveCorruptedIndex;
3403    }
3404
3405    public void setArchiveCorruptedIndex(boolean archiveCorruptedIndex) {
3406        this.archiveCorruptedIndex = archiveCorruptedIndex;
3407    }
3408
3409    public float getIndexLFUEvictionFactor() {
3410        return indexLFUEvictionFactor;
3411    }
3412
3413    public void setIndexLFUEvictionFactor(float indexLFUEvictionFactor) {
3414        this.indexLFUEvictionFactor = indexLFUEvictionFactor;
3415    }
3416
3417    public boolean isUseIndexLFRUEviction() {
3418        return useIndexLFRUEviction;
3419    }
3420
3421    public void setUseIndexLFRUEviction(boolean useIndexLFRUEviction) {
3422        this.useIndexLFRUEviction = useIndexLFRUEviction;
3423    }
3424
3425    public void setEnableIndexDiskSyncs(boolean enableIndexDiskSyncs) {
3426        this.enableIndexDiskSyncs = enableIndexDiskSyncs;
3427    }
3428
3429    public void setEnableIndexRecoveryFile(boolean enableIndexRecoveryFile) {
3430        this.enableIndexRecoveryFile = enableIndexRecoveryFile;
3431    }
3432
3433    public void setEnableIndexPageCaching(boolean enableIndexPageCaching) {
3434        this.enableIndexPageCaching = enableIndexPageCaching;
3435    }
3436
3437    public boolean isEnableIndexDiskSyncs() {
3438        return enableIndexDiskSyncs;
3439    }
3440
3441    public boolean isEnableIndexRecoveryFile() {
3442        return enableIndexRecoveryFile;
3443    }
3444
3445    public boolean isEnableIndexPageCaching() {
3446        return enableIndexPageCaching;
3447    }
3448
3449    // /////////////////////////////////////////////////////////////////
3450    // Internal conversion methods.
3451    // /////////////////////////////////////////////////////////////////
3452
3453    class MessageOrderCursor{
3454        long defaultCursorPosition;
3455        long lowPriorityCursorPosition;
3456        long highPriorityCursorPosition;
3457        MessageOrderCursor(){
3458        }
3459
3460        MessageOrderCursor(long position){
3461            this.defaultCursorPosition=position;
3462            this.lowPriorityCursorPosition=position;
3463            this.highPriorityCursorPosition=position;
3464        }
3465
3466        MessageOrderCursor(MessageOrderCursor other){
3467            this.defaultCursorPosition=other.defaultCursorPosition;
3468            this.lowPriorityCursorPosition=other.lowPriorityCursorPosition;
3469            this.highPriorityCursorPosition=other.highPriorityCursorPosition;
3470        }
3471
3472        MessageOrderCursor copy() {
3473            return new MessageOrderCursor(this);
3474        }
3475
3476        void reset() {
3477            this.defaultCursorPosition=0;
3478            this.highPriorityCursorPosition=0;
3479            this.lowPriorityCursorPosition=0;
3480        }
3481
3482        void increment() {
3483            if (defaultCursorPosition!=0) {
3484                defaultCursorPosition++;
3485            }
3486            if (highPriorityCursorPosition!=0) {
3487                highPriorityCursorPosition++;
3488            }
3489            if (lowPriorityCursorPosition!=0) {
3490                lowPriorityCursorPosition++;
3491            }
3492        }
3493
3494        @Override
3495        public String toString() {
3496           return "MessageOrderCursor:[def:" + defaultCursorPosition
3497                   + ", low:" + lowPriorityCursorPosition
3498                   + ", high:" +  highPriorityCursorPosition + "]";
3499        }
3500
3501        public void sync(MessageOrderCursor other) {
3502            this.defaultCursorPosition=other.defaultCursorPosition;
3503            this.lowPriorityCursorPosition=other.lowPriorityCursorPosition;
3504            this.highPriorityCursorPosition=other.highPriorityCursorPosition;
3505        }
3506    }
3507
3508    class MessageOrderIndex {
3509        static final byte HI = 9;
3510        static final byte LO = 0;
3511        static final byte DEF = 4;
3512
3513        long nextMessageId;
3514        BTreeIndex<Long, MessageKeys> defaultPriorityIndex;
3515        BTreeIndex<Long, MessageKeys> lowPriorityIndex;
3516        BTreeIndex<Long, MessageKeys> highPriorityIndex;
3517        final MessageOrderCursor cursor = new MessageOrderCursor();
3518        Long lastDefaultKey;
3519        Long lastHighKey;
3520        Long lastLowKey;
3521        byte lastGetPriority;
3522        final List<Long> pendingAdditions = new LinkedList<>();
3523        final MessageKeysMarshaller messageKeysMarshaller = new MessageKeysMarshaller();
3524
3525        MessageKeys remove(Transaction tx, Long key) throws IOException {
3526            MessageKeys result = defaultPriorityIndex.remove(tx, key);
3527            if (result == null && highPriorityIndex!=null) {
3528                result = highPriorityIndex.remove(tx, key);
3529                if (result ==null && lowPriorityIndex!=null) {
3530                    result = lowPriorityIndex.remove(tx, key);
3531                }
3532            }
3533            return result;
3534        }
3535
3536        void load(Transaction tx) throws IOException {
3537            defaultPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE);
3538            defaultPriorityIndex.setValueMarshaller(messageKeysMarshaller);
3539            defaultPriorityIndex.load(tx);
3540            lowPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE);
3541            lowPriorityIndex.setValueMarshaller(messageKeysMarshaller);
3542            lowPriorityIndex.load(tx);
3543            highPriorityIndex.setKeyMarshaller(LongMarshaller.INSTANCE);
3544            highPriorityIndex.setValueMarshaller(messageKeysMarshaller);
3545            highPriorityIndex.load(tx);
3546        }
3547
3548        void allocate(Transaction tx) throws IOException {
3549            defaultPriorityIndex = new BTreeIndex<>(pageFile, tx.allocate());
3550            if (metadata.version >= 2) {
3551                lowPriorityIndex = new BTreeIndex<>(pageFile, tx.allocate());
3552                highPriorityIndex = new BTreeIndex<>(pageFile, tx.allocate());
3553            }
3554        }
3555
3556        void configureLast(Transaction tx) throws IOException {
3557            // Figure out the next key using the last entry in the destination.
3558            TreeSet<Long> orderedSet = new TreeSet<>();
3559
3560            addLast(orderedSet, highPriorityIndex, tx);
3561            addLast(orderedSet, defaultPriorityIndex, tx);
3562            addLast(orderedSet, lowPriorityIndex, tx);
3563
3564            if (!orderedSet.isEmpty()) {
3565                nextMessageId = orderedSet.last() + 1;
3566            }
3567        }
3568
3569        private void addLast(TreeSet<Long> orderedSet, BTreeIndex<Long, MessageKeys> index, Transaction tx) throws IOException {
3570            if (index != null) {
3571                Entry<Long, MessageKeys> lastEntry = index.getLast(tx);
3572                if (lastEntry != null) {
3573                    orderedSet.add(lastEntry.getKey());
3574                }
3575            }
3576        }
3577
3578        void clear(Transaction tx) throws IOException {
3579            this.remove(tx);
3580            this.resetCursorPosition();
3581            this.allocate(tx);
3582            this.load(tx);
3583            this.configureLast(tx);
3584        }
3585
3586        void remove(Transaction tx) throws IOException {
3587            defaultPriorityIndex.clear(tx);
3588            defaultPriorityIndex.unload(tx);
3589            tx.free(defaultPriorityIndex.getPageId());
3590            if (lowPriorityIndex != null) {
3591                lowPriorityIndex.clear(tx);
3592                lowPriorityIndex.unload(tx);
3593
3594                tx.free(lowPriorityIndex.getPageId());
3595            }
3596            if (highPriorityIndex != null) {
3597                highPriorityIndex.clear(tx);
3598                highPriorityIndex.unload(tx);
3599                tx.free(highPriorityIndex.getPageId());
3600            }
3601        }
3602
3603        void resetCursorPosition() {
3604            this.cursor.reset();
3605            lastDefaultKey = null;
3606            lastHighKey = null;
3607            lastLowKey = null;
3608        }
3609
3610        void setBatch(Transaction tx, Long sequence) throws IOException {
3611            if (sequence != null) {
3612                Long nextPosition = new Long(sequence.longValue() + 1);
3613                lastDefaultKey = sequence;
3614                cursor.defaultCursorPosition = nextPosition.longValue();
3615                lastHighKey = sequence;
3616                cursor.highPriorityCursorPosition = nextPosition.longValue();
3617                lastLowKey = sequence;
3618                cursor.lowPriorityCursorPosition = nextPosition.longValue();
3619            }
3620        }
3621
3622        void setBatch(Transaction tx, LastAck last) throws IOException {
3623            setBatch(tx, last.lastAckedSequence);
3624            if (cursor.defaultCursorPosition == 0
3625                    && cursor.highPriorityCursorPosition == 0
3626                    && cursor.lowPriorityCursorPosition == 0) {
3627                long next = last.lastAckedSequence + 1;
3628                switch (last.priority) {
3629                    case DEF:
3630                        cursor.defaultCursorPosition = next;
3631                        cursor.highPriorityCursorPosition = next;
3632                        break;
3633                    case HI:
3634                        cursor.highPriorityCursorPosition = next;
3635                        break;
3636                    case LO:
3637                        cursor.lowPriorityCursorPosition = next;
3638                        cursor.defaultCursorPosition = next;
3639                        cursor.highPriorityCursorPosition = next;
3640                        break;
3641                }
3642            }
3643        }
3644
3645        void stoppedIterating() {
3646            if (lastDefaultKey!=null) {
3647                cursor.defaultCursorPosition=lastDefaultKey.longValue()+1;
3648            }
3649            if (lastHighKey!=null) {
3650                cursor.highPriorityCursorPosition=lastHighKey.longValue()+1;
3651            }
3652            if (lastLowKey!=null) {
3653                cursor.lowPriorityCursorPosition=lastLowKey.longValue()+1;
3654            }
3655            lastDefaultKey = null;
3656            lastHighKey = null;
3657            lastLowKey = null;
3658        }
3659
3660        void getDeleteList(Transaction tx, ArrayList<Entry<Long, MessageKeys>> deletes, Long sequenceId)
3661                throws IOException {
3662            if (defaultPriorityIndex.containsKey(tx, sequenceId)) {
3663                getDeleteList(tx, deletes, defaultPriorityIndex, sequenceId);
3664            } else if (highPriorityIndex != null && highPriorityIndex.containsKey(tx, sequenceId)) {
3665                getDeleteList(tx, deletes, highPriorityIndex, sequenceId);
3666            } else if (lowPriorityIndex != null && lowPriorityIndex.containsKey(tx, sequenceId)) {
3667                getDeleteList(tx, deletes, lowPriorityIndex, sequenceId);
3668            }
3669        }
3670
3671        void getDeleteList(Transaction tx, ArrayList<Entry<Long, MessageKeys>> deletes,
3672                BTreeIndex<Long, MessageKeys> index, Long sequenceId) throws IOException {
3673
3674            Iterator<Entry<Long, MessageKeys>> iterator = index.iterator(tx, sequenceId, null);
3675            deletes.add(iterator.next());
3676        }
3677
3678        long getNextMessageId() {
3679            return nextMessageId++;
3680        }
3681
3682        void revertNextMessageId() {
3683            nextMessageId--;
3684        }
3685
3686        MessageKeys get(Transaction tx, Long key) throws IOException {
3687            MessageKeys result = defaultPriorityIndex.get(tx, key);
3688            if (result == null) {
3689                result = highPriorityIndex.get(tx, key);
3690                if (result == null) {
3691                    result = lowPriorityIndex.get(tx, key);
3692                    lastGetPriority = LO;
3693                } else {
3694                    lastGetPriority = HI;
3695                }
3696            } else {
3697                lastGetPriority = DEF;
3698            }
3699            return result;
3700        }
3701
3702        MessageKeys put(Transaction tx, int priority, Long key, MessageKeys value) throws IOException {
3703            if (priority == javax.jms.Message.DEFAULT_PRIORITY) {
3704                return defaultPriorityIndex.put(tx, key, value);
3705            } else if (priority > javax.jms.Message.DEFAULT_PRIORITY) {
3706                return highPriorityIndex.put(tx, key, value);
3707            } else {
3708                return lowPriorityIndex.put(tx, key, value);
3709            }
3710        }
3711
3712        Iterator<Entry<Long, MessageKeys>> iterator(Transaction tx) throws IOException{
3713            return new MessageOrderIterator(tx,cursor,this);
3714        }
3715
3716        Iterator<Entry<Long, MessageKeys>> iterator(Transaction tx, MessageOrderCursor m) throws IOException{
3717            return new MessageOrderIterator(tx,m,this);
3718        }
3719
3720        public byte lastGetPriority() {
3721            return lastGetPriority;
3722        }
3723
3724        public boolean alreadyDispatched(Long sequence) {
3725            return (cursor.highPriorityCursorPosition > 0 && cursor.highPriorityCursorPosition >= sequence) ||
3726                    (cursor.defaultCursorPosition > 0 && cursor.defaultCursorPosition >= sequence) ||
3727                    (cursor.lowPriorityCursorPosition > 0 && cursor.lowPriorityCursorPosition >= sequence);
3728        }
3729
3730        public void trackPendingAdd(Long seq) {
3731            synchronized (pendingAdditions) {
3732                pendingAdditions.add(seq);
3733            }
3734        }
3735
3736        public void trackPendingAddComplete(Long seq) {
3737            synchronized (pendingAdditions) {
3738                pendingAdditions.remove(seq);
3739            }
3740        }
3741
3742        public Long minPendingAdd() {
3743            synchronized (pendingAdditions) {
3744                if (!pendingAdditions.isEmpty()) {
3745                    return pendingAdditions.get(0);
3746                } else {
3747                    return null;
3748                }
3749            }
3750        }
3751
3752        class MessageOrderIterator implements Iterator<Entry<Long, MessageKeys>>{
3753            Iterator<Entry<Long, MessageKeys>>currentIterator;
3754            final Iterator<Entry<Long, MessageKeys>>highIterator;
3755            final Iterator<Entry<Long, MessageKeys>>defaultIterator;
3756            final Iterator<Entry<Long, MessageKeys>>lowIterator;
3757
3758            MessageOrderIterator(Transaction tx, MessageOrderCursor m, MessageOrderIndex messageOrderIndex) throws IOException {
3759                Long pendingAddLimiter = messageOrderIndex.minPendingAdd();
3760                this.defaultIterator = defaultPriorityIndex.iterator(tx, m.defaultCursorPosition, pendingAddLimiter);
3761                if (highPriorityIndex != null) {
3762                    this.highIterator = highPriorityIndex.iterator(tx, m.highPriorityCursorPosition, pendingAddLimiter);
3763                } else {
3764                    this.highIterator = null;
3765                }
3766                if (lowPriorityIndex != null) {
3767                    this.lowIterator = lowPriorityIndex.iterator(tx, m.lowPriorityCursorPosition, pendingAddLimiter);
3768                } else {
3769                    this.lowIterator = null;
3770                }
3771            }
3772
3773            @Override
3774            public boolean hasNext() {
3775                if (currentIterator == null) {
3776                    if (highIterator != null) {
3777                        if (highIterator.hasNext()) {
3778                            currentIterator = highIterator;
3779                            return currentIterator.hasNext();
3780                        }
3781                        if (defaultIterator.hasNext()) {
3782                            currentIterator = defaultIterator;
3783                            return currentIterator.hasNext();
3784                        }
3785                        if (lowIterator.hasNext()) {
3786                            currentIterator = lowIterator;
3787                            return currentIterator.hasNext();
3788                        }
3789                        return false;
3790                    } else {
3791                        currentIterator = defaultIterator;
3792                        return currentIterator.hasNext();
3793                    }
3794                }
3795                if (highIterator != null) {
3796                    if (currentIterator.hasNext()) {
3797                        return true;
3798                    }
3799                    if (currentIterator == highIterator) {
3800                        if (defaultIterator.hasNext()) {
3801                            currentIterator = defaultIterator;
3802                            return currentIterator.hasNext();
3803                        }
3804                        if (lowIterator.hasNext()) {
3805                            currentIterator = lowIterator;
3806                            return currentIterator.hasNext();
3807                        }
3808                        return false;
3809                    }
3810
3811                    if (currentIterator == defaultIterator) {
3812                        if (lowIterator.hasNext()) {
3813                            currentIterator = lowIterator;
3814                            return currentIterator.hasNext();
3815                        }
3816                        return false;
3817                    }
3818                }
3819                return currentIterator.hasNext();
3820            }
3821
3822            @Override
3823            public Entry<Long, MessageKeys> next() {
3824                Entry<Long, MessageKeys> result = currentIterator.next();
3825                if (result != null) {
3826                    Long key = result.getKey();
3827                    if (highIterator != null) {
3828                        if (currentIterator == defaultIterator) {
3829                            lastDefaultKey = key;
3830                        } else if (currentIterator == highIterator) {
3831                            lastHighKey = key;
3832                        } else {
3833                            lastLowKey = key;
3834                        }
3835                    } else {
3836                        lastDefaultKey = key;
3837                    }
3838                }
3839                return result;
3840            }
3841
3842            @Override
3843            public void remove() {
3844                throw new UnsupportedOperationException();
3845            }
3846        }
3847    }
3848
3849    private static class HashSetStringMarshaller extends VariableMarshaller<HashSet<String>> {
3850        final static HashSetStringMarshaller INSTANCE = new HashSetStringMarshaller();
3851
3852        @Override
3853        public void writePayload(HashSet<String> object, DataOutput dataOut) throws IOException {
3854            ByteArrayOutputStream baos = new ByteArrayOutputStream();
3855            ObjectOutputStream oout = new ObjectOutputStream(baos);
3856            oout.writeObject(object);
3857            oout.flush();
3858            oout.close();
3859            byte[] data = baos.toByteArray();
3860            dataOut.writeInt(data.length);
3861            dataOut.write(data);
3862        }
3863
3864        @Override
3865        @SuppressWarnings("unchecked")
3866        public HashSet<String> readPayload(DataInput dataIn) throws IOException {
3867            int dataLen = dataIn.readInt();
3868            byte[] data = new byte[dataLen];
3869            dataIn.readFully(data);
3870            ByteArrayInputStream bais = new ByteArrayInputStream(data);
3871            ObjectInputStream oin = new ObjectInputStream(bais);
3872            try {
3873                return (HashSet<String>) oin.readObject();
3874            } catch (ClassNotFoundException cfe) {
3875                IOException ioe = new IOException("Failed to read HashSet<String>: " + cfe);
3876                ioe.initCause(cfe);
3877                throw ioe;
3878            }
3879        }
3880    }
3881
3882    public File getIndexDirectory() {
3883        return indexDirectory;
3884    }
3885
3886    public void setIndexDirectory(File indexDirectory) {
3887        this.indexDirectory = indexDirectory;
3888    }
3889
3890    interface IndexAware {
3891        public void sequenceAssignedWithIndexLocked(long index);
3892    }
3893
3894    public String getPreallocationScope() {
3895        return preallocationScope;
3896    }
3897
3898    public void setPreallocationScope(String preallocationScope) {
3899        this.preallocationScope = preallocationScope;
3900    }
3901
3902    public String getPreallocationStrategy() {
3903        return preallocationStrategy;
3904    }
3905
3906    public void setPreallocationStrategy(String preallocationStrategy) {
3907        this.preallocationStrategy = preallocationStrategy;
3908    }
3909
3910    public int getCompactAcksAfterNoGC() {
3911        return compactAcksAfterNoGC;
3912    }
3913
3914    /**
3915     * Sets the number of GC cycles where no journal logs were removed before an attempt to
3916     * move forward all the acks in the last log that contains them and is otherwise unreferenced.
3917     * <p>
3918     * A value of -1 will disable this feature.
3919     *
3920     * @param compactAcksAfterNoGC
3921     *      Number of empty GC cycles before we rewrite old ACKS.
3922     */
3923    public void setCompactAcksAfterNoGC(int compactAcksAfterNoGC) {
3924        this.compactAcksAfterNoGC = compactAcksAfterNoGC;
3925    }
3926
3927    /**
3928     * Returns whether Ack compaction will ignore that the store is still growing
3929     * and run more often.
3930     *
3931     * @return the compactAcksIgnoresStoreGrowth current value.
3932     */
3933    public boolean isCompactAcksIgnoresStoreGrowth() {
3934        return compactAcksIgnoresStoreGrowth;
3935    }
3936
3937    /**
3938     * Configure if Ack compaction will occur regardless of continued growth of the
3939     * journal logs meaning that the store has not run out of space yet.  Because the
3940     * compaction operation can be costly this value is defaulted to off and the Ack
3941     * compaction is only done when it seems that the store cannot grow and larger.
3942     *
3943     * @param compactAcksIgnoresStoreGrowth the compactAcksIgnoresStoreGrowth to set
3944     */
3945    public void setCompactAcksIgnoresStoreGrowth(boolean compactAcksIgnoresStoreGrowth) {
3946        this.compactAcksIgnoresStoreGrowth = compactAcksIgnoresStoreGrowth;
3947    }
3948
3949    /**
3950     * Returns whether Ack compaction is enabled
3951     *
3952     * @return enableAckCompaction
3953     */
3954    public boolean isEnableAckCompaction() {
3955        return enableAckCompaction;
3956    }
3957
3958    /**
3959     * Configure if the Ack compaction task should be enabled to run
3960     *
3961     * @param enableAckCompaction
3962     */
3963    public void setEnableAckCompaction(boolean enableAckCompaction) {
3964        this.enableAckCompaction = enableAckCompaction;
3965    }
3966
3967    /**
3968     * @return
3969     */
3970    public boolean isEnableSubscriptionStatistics() {
3971        return enableSubscriptionStatistics;
3972    }
3973
3974    /**
3975     * Enable caching statistics for each subscription to allow non-blocking
3976     * retrieval of metrics.  This could incur some overhead to compute if there are a lot
3977     * of subscriptions.
3978     *
3979     * @param enableSubscriptionStatistics
3980     */
3981    public void setEnableSubscriptionStatistics(boolean enableSubscriptionStatistics) {
3982        this.enableSubscriptionStatistics = enableSubscriptionStatistics;
3983    }
3984}