Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -900,7 +900,10 @@ private long increase(long lastUsage, long usage, long lastTime, long now, long
long averageUsage = divideCeil(usage * precision, windowSize);

if (lastTime != now) {
assert now > lastTime;
if (now < lastTime) {
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It needs to be further confirmed that this branch would not have occurred historically.

throw new IllegalArgumentException(
"Time went backwards: now=" + now + ", lastTime=" + lastTime);
}
if (lastTime + windowSize > now) {
long delta = now - lastTime;
double decay = (windowSize - delta) / (double) windowSize;
Expand Down Expand Up @@ -930,7 +933,10 @@ public long calculateGlobalEnergyLimit(AccountCapsule accountCapsule) {
long totalEnergyLimit = getDynamicPropertiesStore().getTotalEnergyCurrentLimit();
long totalEnergyWeight = getDynamicPropertiesStore().getTotalEnergyWeight();

assert totalEnergyWeight > 0;
if (totalEnergyWeight <= 0) {
throw new IllegalStateException(
"totalEnergyWeight must be positive, got: " + totalEnergyWeight);
}

return (long) (energyWeight * ((double) totalEnergyLimit / totalEnergyWeight));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,10 @@ private static long convertVectorToLong(List<Boolean> v) throws ZksnarkException
}

public byte[] encode() throws ZksnarkException {
assert (authenticationPath.size() == index.size());
if (authenticationPath.size() != index.size()) {
throw new ZksnarkException("authenticationPath and index size mismatch: "
+ authenticationPath.size() + " != " + index.size());
}
List<List<Byte>> pathByteList = Lists.newArrayList();
long indexLong; // 64
for (int i = 0; i < authenticationPath.size(); i++) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,10 @@ public long calculateGlobalEnergyLimit(AccountCapsule accountCapsule) {
if (dynamicPropertiesStore.allowNewReward() && totalEnergyWeight <= 0) {
return 0;
} else {
assert totalEnergyWeight > 0;
if (totalEnergyWeight <= 0) {
throw new IllegalStateException(
"totalEnergyWeight must be positive, got: " + totalEnergyWeight);
}
}
return (long) (energyWeight * ((double) totalEnergyLimit / totalEnergyWeight));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,10 @@ protected long increase(long lastUsage, long usage, long lastTime, long now, lon
long averageUsage = divideCeil(usage * precision, windowSize);

if (lastTime != now) {
assert now > lastTime;
if (now < lastTime) {
throw new IllegalArgumentException(
"Time went backwards: now=" + now + ", lastTime=" + lastTime);
}
if (lastTime + windowSize > now) {
long delta = now - lastTime;
double decay = (windowSize - delta) / (double) windowSize;
Expand Down
10 changes: 10 additions & 0 deletions chainbase/src/main/java/org/tron/core/db/TransactionTrace.java
Original file line number Diff line number Diff line change
Expand Up @@ -300,6 +300,11 @@ private void resetAccountUsage(AccountCapsule accountCap,
- (mergedUsage * mergedSize - usage * size);
// If area merging happened during suicide, use the current window size
long newSize = mergedSize == currentSize ? size : currentSize;
if (newSize == 0) {
accountCap.setEnergyUsage(0);
accountCap.setNewWindowSize(ENERGY, 0L);
return;
}
// Calc new usage by fixed x-axes
long newUsage = max(0, newArea / newSize, dynamicPropertiesStore.disableJavaLangMath());
// Reset account usage and window size
Expand All @@ -317,6 +322,11 @@ private void resetAccountUsageV2(AccountCapsule accountCap,
// If area merging happened during suicide, use the current window size
long newSize = mergedSize == currentSize ? size : currentSize;
long newSize2 = mergedSize == currentSize ? size2 : currentSize2;
if (newSize == 0) {
accountCap.setEnergyUsage(0);
accountCap.setNewWindowSizeV2(ENERGY, 0L);
return;
}
// Calc new usage by fixed x-axes
long newUsage = max(0, newArea / newSize, dynamicPropertiesStore.disableJavaLangMath());
// Reset account usage and window size
Expand Down
9 changes: 6 additions & 3 deletions chainbase/src/main/java/org/tron/core/db/TronDatabase.java
Original file line number Diff line number Diff line change
Expand Up @@ -78,12 +78,15 @@ public void close() {
logger.info("******** Begin to close {}. ********", getName());
try {
writeOptions.close();
} catch (Exception e) {
logger.warn("Failed to close writeOptions in {}.", getName(), e);
}
try {
dbSource.closeDB();
} catch (Exception e) {
logger.warn("Failed to close {}.", getName(), e);
} finally {
logger.info("******** End to close {}. ********", getName());
logger.warn("Failed to close dbSource in {}.", getName(), e);
}
logger.info("******** End to close {}. ********", getName());
}

public abstract void put(byte[] key, T item);
Expand Down
14 changes: 12 additions & 2 deletions chainbase/src/main/java/org/tron/core/db2/common/LevelDB.java
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,13 @@
import java.util.HashMap;
import java.util.Map;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.tron.common.parameter.CommonParameter;
import org.tron.common.storage.WriteOptionsWrapper;
import org.tron.common.storage.leveldb.LevelDbDataSourceImpl;
import org.tron.core.db.common.iterator.DBIterator;

@Slf4j(topic = "DB")
public class LevelDB implements DB<byte[], byte[]>, Flusher {

@Getter
Expand Down Expand Up @@ -65,8 +67,16 @@ public void flush(Map<WrappedByteArray, WrappedByteArray> batch) {

@Override
public void close() {
this.writeOptions.close();
db.closeDB();
try {
this.writeOptions.close();
} catch (Exception e) {
logger.warn("Failed to close writeOptions.", e);
}
try {
db.closeDB();
} catch (Exception e) {
logger.warn("Failed to close db.", e);
}
}

@Override
Expand Down
14 changes: 12 additions & 2 deletions chainbase/src/main/java/org/tron/core/db2/common/RocksDB.java
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,13 @@
import java.util.HashMap;
import java.util.Map;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.tron.common.parameter.CommonParameter;
import org.tron.common.storage.WriteOptionsWrapper;
import org.tron.common.storage.rocksdb.RocksDbDataSourceImpl;
import org.tron.core.db.common.iterator.DBIterator;

@Slf4j(topic = "DB")
public class RocksDB implements DB<byte[], byte[]>, Flusher {

@Getter
Expand Down Expand Up @@ -66,8 +68,16 @@ public void flush(Map<WrappedByteArray, WrappedByteArray> batch) {

@Override
public void close() {
writeOptions.close();
db.closeDB();
try {
writeOptions.close();
} catch (Exception e) {
logger.warn("Failed to close writeOptions.", e);
}
try {
db.closeDB();
} catch (Exception e) {
logger.warn("Failed to close db.", e);
}
}

@Override
Expand Down
26 changes: 15 additions & 11 deletions chainbase/src/main/java/org/tron/core/db2/core/Chainbase.java
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ public enum Cursor {
//true:fullnode, false:soliditynode
private ThreadLocal<Cursor> cursor = new ThreadLocal<>();
private ThreadLocal<Long> offset = new ThreadLocal<>();
private Snapshot head;
private volatile Snapshot head;

public Chainbase(Snapshot head) {
this.head = head;
Expand Down Expand Up @@ -349,28 +349,32 @@ private Map<byte[], byte[]> getNext(Snapshot head, byte[] key, long limit) {
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}

// Snapshot head once: prefixQueryRoot and prefixQuerySnapshot both need the same
// head reference. Other read methods (get, getUnchecked, has) only read head once
// via head(), so volatile alone is sufficient for them.
public Map<WrappedByteArray, byte[]> prefixQuery(byte[] key) {
Map<WrappedByteArray, byte[]> result = prefixQueryRoot(key);
Map<WrappedByteArray, byte[]> snapshot = prefixQuerySnapshot(key);
Snapshot localHead = head;
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

prefixQuery() previously depended on cursor-resolved snapshot selection through head(), which supports HEAD, SOLIDITY, and PBFT. The current implementation now bypasses that logic by reading the raw head field directly. As a result, prefixQuery() can diverge from get()/getUnchecked()/has() under non-HEAD cursors.

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Valid issue. The original code used head() in prefixQuerySnapshot which routes through cursor logic (HEAD/SOLIDITY/PBFT), but this PR replaced it with the raw head field, bypassing cursor resolution entirely. Under SOLIDITY or PBFT cursors, prefixQuery would now return unconfirmed head-level data that get()/getUnchecked()/has() would not see.

Fix: change Snapshot localHead = head to Snapshot localHead = head(). This preserves both the cursor semantics and the single-read consistency that the local snapshot was intended to provide.

Map<WrappedByteArray, byte[]> result = prefixQueryRoot(localHead, key);
Map<WrappedByteArray, byte[]> snapshot = prefixQuerySnapshot(localHead, key);
result.putAll(snapshot);
result.entrySet().removeIf(e -> e.getValue() == null);
return result;
}

private Map<WrappedByteArray, byte[]> prefixQueryRoot(byte[] key) {
private Map<WrappedByteArray, byte[]> prefixQueryRoot(Snapshot localHead, byte[] key) {
Map<WrappedByteArray, byte[]> result = new HashMap<>();
if (((SnapshotRoot) head.getRoot()).db.getClass() == LevelDB.class) {
result = ((LevelDB) ((SnapshotRoot) head.getRoot()).db).getDb().prefixQuery(key);
} else if (((SnapshotRoot) head.getRoot()).db.getClass() == RocksDB.class) {
result = ((RocksDB) ((SnapshotRoot) head.getRoot()).db).getDb().prefixQuery(key);
if (((SnapshotRoot) localHead.getRoot()).db.getClass() == LevelDB.class) {
result = ((LevelDB) ((SnapshotRoot) localHead.getRoot()).db).getDb().prefixQuery(key);
} else if (((SnapshotRoot) localHead.getRoot()).db.getClass() == RocksDB.class) {
result = ((RocksDB) ((SnapshotRoot) localHead.getRoot()).db).getDb().prefixQuery(key);
}
return result;
}

private Map<WrappedByteArray, byte[]> prefixQuerySnapshot(byte[] key) {
private Map<WrappedByteArray, byte[]> prefixQuerySnapshot(Snapshot localHead, byte[] key) {
Map<WrappedByteArray, byte[]> result = new HashMap<>();
Snapshot snapshot = head();
if (!snapshot.equals(head.getRoot())) {
Snapshot snapshot = localHead;
if (!snapshot.equals(localHead.getRoot())) {
Map<WrappedByteArray, WrappedByteArray> all = new HashMap<>();
((SnapshotImpl) snapshot).collect(all, key);
all.forEach((k, v) -> result.put(k, v.getBytes()));
Comment on lines +376 to 380
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Though it appears to bind the query to the caller-selected view via Snapshot snapshot = localHead;, but the subsequent call to ((SnapshotImpl) snapshot).collect(all, key) does not actually collect from localHead. Since SnapshotImpl.collect(...) starts from getRoot().getNext() and walks forward until the latest tip, so the effective scan range is the entire overlay chain after root, not the caller’s resolved snapshot.

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good catch. You're right that collect() always walks from getRoot().getNext() to the end of the chain, regardless of which SnapshotImpl instance it's called on. So even with the local snapshot, if head advances during prefixQuery, collect() will scan into the newly appended snapshots.

This can be fixed by adding a boundary parameter to collect():

// SnapshotImpl.java
synchronized void collect(Map<WrappedByteArray, WrappedByteArray> all,
                          byte[] prefix, Snapshot boundary) {
    Snapshot next = getRoot().getNext();
    while (next != null) {
        Streams.stream(((SnapshotImpl) next).db)
            .filter(e -> Bytes.indexOf(
                Objects.requireNonNull(e.getKey().getBytes()), prefix) == 0)
            .forEach(e -> all.put(WrappedByteArray.of(e.getKey().getBytes()),
                WrappedByteArray.of(e.getValue().getBytes())));
        if (next == boundary) {
            break;
        }
        next = next.getNext();
    }
}

Then prefixQuerySnapshot passes localHead as the boundary:

((SnapshotImpl) localHead).collect(all, key, localHead);

That said, two notes:

  1. This is a pre-existing issue, not introduced by this PR. The original code read head directly and collect() walked to the end in exactly the same way. The local snapshot fix in this PR still provides value — it ensures prefixQueryRoot and prefixQuerySnapshot use the same getRoot(), which was the original read-read inconsistency.

  2. Other callers have the same issue. Chainbase.getAll() (line 179) and getNext() (line 325) also call collect() without a boundary. If this is worth fixing, all call sites should be addressed together.

Recommend tracking this as a separate follow-up issue rather than expanding the scope of this PR.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -511,9 +511,9 @@ private void checkV2() {
if (latestTimestamp - timestamp > ONE_MINUTE_MILLS*2) {
continue;
}
TronDatabase<byte[]> checkPointV2Store = getCheckpointDB(cp);
recover(checkPointV2Store);
checkPointV2Store.close();
try (TronDatabase<byte[]> checkPointV2Store = getCheckpointDB(cp)) {
recover(checkPointV2Store);
}
}
logger.info("checkpoint v2 recover success");
unChecked = false;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,19 +63,19 @@ public void updateByBatch(Map<byte[], byte[]> rows) {
}

/**
* close the database.
* Closes the database and releases all resources.
* Note: this class and the parent TronDatabase each declare their own
* writeOptions field (Java fields are not polymorphic). Both must be
* closed: this.writeOptions here, and the parent's via super.close().
*/
@Override
public void close() {
logger.debug("******** Begin to close {}. ********", getName());
try {
writeOptions.close();
dbSource.closeDB();
} catch (Exception e) {
logger.warn("Failed to close {}.", getName(), e);
} finally {
logger.debug("******** End to close {}. ********", getName());
logger.warn("Failed to close writeOptions in {}.", getName(), e);
}
super.close();
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Random;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import lombok.extern.slf4j.Slf4j;
Expand Down Expand Up @@ -114,7 +114,7 @@ private void disconnectRandom() {
.collect(Collectors.toList());
}
if (!peers.isEmpty()) {
int index = new Random().nextInt(peers.size());
int index = ThreadLocalRandom.current().nextInt(peers.size());
disconnectFromPeer(peers.get(index), ReasonCode.RANDOM_ELIMINATION,
DisconnectCause.RANDOM_ELIMINATION);
}
Expand Down Expand Up @@ -236,19 +236,21 @@ private enum DisconnectCause {
static class WeightedRandom {

private final Map<Object, Integer> weights;
private final Random random;

public WeightedRandom(Map<Object, Integer> weights) {
this.weights = weights;
this.random = new Random();
}

public Object next() {
int totalWeight = 0;
for (int weight : weights.values()) {
totalWeight += weight;
}
int randomNum = random.nextInt(totalWeight);
if (totalWeight <= 0) {
throw new IllegalStateException(
"Total weight must be positive, got: " + totalWeight);
}
int randomNum = ThreadLocalRandom.current().nextInt(totalWeight);
for (Object key : weights.keySet()) {
randomNum -= weights.get(key);
if (randomNum < 0) {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
package org.tron.common.zksnark;

import static org.junit.Assert.assertThrows;

import com.google.common.collect.Lists;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.junit.Test;
import org.tron.core.exception.ZksnarkException;

public class MerklePathEncodeTest {

@Test
public void testEncode_sizeMismatch_throwsZksnarkException() {
List<List<Boolean>> authPath = Arrays.asList(
Arrays.asList(true, false),
Arrays.asList(false, true)
);
List<Boolean> index = Collections.singletonList(true); // size 1, authPath size 2

MerklePath merklePath = new MerklePath(authPath, index);

assertThrows(ZksnarkException.class, merklePath::encode);
}

@Test
public void testEncode_emptyLists_noException() throws ZksnarkException {
MerklePath merklePath = new MerklePath(Lists.newArrayList(), Lists.newArrayList());
merklePath.encode(); // should not throw
}
}
Loading
Loading