networkInterfaces = NetworkInterface.getNetworkInterfaces();
- while (networkInterfaces.hasMoreElements()) {
- NetworkInterface networkInterface = networkInterfaces.nextElement();
- byte[] mac = networkInterface.getHardwareAddress();
- if (mac != null) {
- for(byte macPort: mac) {
- sb.append(String.format("%02X", macPort));
- }
- }
- }
- nodeId = sb.toString().hashCode();
- } catch (Exception ex) {
- nodeId = (new SecureRandom().nextInt());
- }
- nodeId = nodeId & maxNodeId;
- return nodeId;
- }
-
- public long[] parse(long id) {
- long maskNodeId = ((1L << NODE_ID_BITS) - 1) << SEQUENCE_BITS;
- long maskSequence = (1L << SEQUENCE_BITS) - 1;
-
- long timestamp = (id >> (NODE_ID_BITS + SEQUENCE_BITS)) + customEpoch;
- long nodeId = (id & maskNodeId) >> SEQUENCE_BITS;
- long sequence = id & maskSequence;
-
- return new long[]{timestamp, nodeId, sequence};
- }
-
- @Override
- public String toString() {
- return "Snowflake Settings [EPOCH_BITS=" + EPOCH_BITS + ", NODE_ID_BITS=" + NODE_ID_BITS
- + ", SEQUENCE_BITS=" + SEQUENCE_BITS + ", CUSTOM_EPOCH=" + customEpoch
- + ", NodeId=" + nodeId + "]";
- }
-}
\ No newline at end of file
diff --git a/src/main/java/io/github/arompr/snowflake/SnowflakeIdGenerator.java b/src/main/java/io/github/arompr/snowflake/SnowflakeIdGenerator.java
new file mode 100644
index 0000000..3f25433
--- /dev/null
+++ b/src/main/java/io/github/arompr/snowflake/SnowflakeIdGenerator.java
@@ -0,0 +1,171 @@
+package io.github.arompr.snowflake;
+
+import java.time.Instant;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * Distributed Sequence Generator.
+ * Inspired by Twitter snowflake:
+ * https://github.com/twitter/snowflake/tree/snowflake-2010
+ *
+ * This class should be used as a Singleton.
+ * Make sure that you create and reuse a Single instance of Snowflake per node
+ * in your distributed system cluster.
+ */
+public class SnowflakeIdGenerator {
+
+ private static final int UNUSED_BITS = 1; // Sign bit, unused
+ private static final int EPOCH_BITS = 41;
+ private static final int NODE_ID_BITS = 10;
+ private static final int SEQUENCE_BITS = 12;
+
+ private static final long maxNodeId = (1L << NODE_ID_BITS) - 1;
+ private static final long maxSequence = (1L << SEQUENCE_BITS) - 1;
+
+ private static final long DEFAULT_CUSTOM_EPOCH = 1420070400000L;
+
+ private final long nodeId;
+ private final long epoch;
+
+ private final AtomicLong lastTimestamp = new AtomicLong(-1L);
+ private final AtomicLong sequence = new AtomicLong(0L);
+
+ /**
+ * Creates a new Snowflake ID generator instance with a specified node ID and
+ * custom epoch.
+ *
+ *
+ * The node ID uniquely identifies this generator in a distributed system. It
+ * must be between 0 and {@code maxNodeId} (inclusive), otherwise an
+ * {@link IllegalArgumentException} is thrown. The custom epoch is used as the
+ * reference timestamp for generating IDs.
+ *
+ *
+ * @param nodeId the unique identifier for this node (0..maxNodeId)
+ * @param customEpoch the custom epoch (in milliseconds) to use as the reference
+ * timestamp
+ * @throws IllegalArgumentException if {@code nodeId} is out of range
+ */
+ public SnowflakeIdGenerator(long nodeId, long customEpoch) {
+ if (nodeId < 0 || nodeId > maxNodeId) {
+ throw new IllegalArgumentException(String.format("NodeId must be between %d and %d", 0, maxNodeId));
+ }
+ this.nodeId = nodeId;
+ this.epoch = customEpoch;
+ }
+
+ /**
+ * Creates a new Snowflake ID generator instance with a specified node ID.
+ *
+ *
+ * The generator will use the default custom epoch {@code DEFAULT_CUSTOM_EPOCH}
+ * as the reference timestamp. The node ID uniquely identifies this generator in
+ * a distributed system and must be between 0 and {@code maxNodeId} (inclusive).
+ *
+ *
+ * @param nodeId the unique identifier for this node (0..maxNodeId)
+ * @throws IllegalArgumentException if {@code nodeId} is out of range
+ */
+ public SnowflakeIdGenerator(long nodeId) {
+ this(nodeId, DEFAULT_CUSTOM_EPOCH);
+ }
+
+ /**
+ * Generates a new unique 64-bit Snowflake ID.
+ *
+ *
+ * This method is thread-safe and can be called concurrently by multiple threads
+ * from the same instance. It handles the following cases:
+ *
+ * - Ensures IDs are monotonically increasing even if the system clock moves
+ * backward.
+ * - Resets the sequence number at the start of a new millisecond.
+ * - Waits for the next millisecond if the sequence overflows within the same
+ * millisecond.
+ *
+ *
+ * @return a unique 64-bit ID composed of timestamp, node ID, and sequence
+ * number.
+ */
+ public long nextId() {
+ for (;;) {
+ long currentTimestamp = timestamp();
+ long previousTimestamp = lastTimestamp.get();
+
+ currentTimestamp = handleClockRollBack(currentTimestamp, previousTimestamp);
+
+ if (currentTimestamp == previousTimestamp) {
+ long sequenceForCurrentMillisecond = incrementSequence();
+
+ // Sequence overflow, wait for next millisecond
+ if (sequenceForCurrentMillisecond == 0) {
+ currentTimestamp = waitUntilNextMillis(previousTimestamp);
+ }
+
+ if (lastTimestamp.compareAndSet(previousTimestamp, currentTimestamp)) {
+ return generateId(currentTimestamp, sequenceForCurrentMillisecond);
+ }
+ } else {
+ // New millisecond, reset sequence
+ if (isNewMillisecond(currentTimestamp, previousTimestamp)) {
+ return generateId(currentTimestamp, 0L);
+ }
+ }
+ }
+ }
+
+ private boolean isNewMillisecond(long currentTimestamp, long previousTimestamp) {
+ return sequence.compareAndSet(sequence.get(), 0L) &&
+ lastTimestamp.compareAndSet(previousTimestamp, currentTimestamp);
+ }
+
+ private long incrementSequence() {
+ return (sequence.incrementAndGet()) & maxSequence;
+ }
+
+ private long handleClockRollBack(long currentTimestamp, long previousTimestamp) {
+ if (currentTimestamp < previousTimestamp) {
+ currentTimestamp = waitUntilNextMillis(previousTimestamp);
+ }
+
+ return currentTimestamp;
+ }
+
+ private long generateId(long timestamp, long seq) {
+ return (timestamp << (NODE_ID_BITS + SEQUENCE_BITS))
+ | (nodeId << SEQUENCE_BITS)
+ | seq;
+ }
+
+ private long timestamp() {
+ return Instant.now().toEpochMilli() - epoch;
+ }
+
+ private long waitUntilNextMillis(long lastTs) {
+ long ts = timestamp();
+ while (ts <= lastTs) {
+ ts = timestamp();
+ }
+ return ts;
+ }
+
+ public long[] parse(long id) {
+ long maskNodeId = ((1L << NODE_ID_BITS) - 1) << SEQUENCE_BITS;
+ long maskSequence = (1L << SEQUENCE_BITS) - 1;
+
+ long timestamp = (id >> (NODE_ID_BITS + SEQUENCE_BITS)) + epoch;
+ long nodeId = (id & maskNodeId) >> SEQUENCE_BITS;
+ long sequence = id & maskSequence;
+
+ return new long[] { timestamp, nodeId, sequence };
+ }
+
+ @Override
+ public String toString() {
+ return "Snowflake Settings [EPOCH_BITS=" + EPOCH_BITS
+ + ", NODE_ID_BITS=" + NODE_ID_BITS
+ + ", SEQUENCE_BITS=" + SEQUENCE_BITS
+ + ", CUSTOM_EPOCH=" + epoch
+ + ", NodeId=" + nodeId + "]";
+ }
+}
diff --git a/src/test/java/io/github/arompr/snowflake/BenchmarkRunner.java b/src/test/java/io/github/arompr/snowflake/BenchmarkRunner.java
new file mode 100644
index 0000000..efc2126
--- /dev/null
+++ b/src/test/java/io/github/arompr/snowflake/BenchmarkRunner.java
@@ -0,0 +1,7 @@
+package io.github.arompr.snowflake;
+
+public class BenchmarkRunner {
+ public static void main(String[] args) throws Exception {
+ org.openjdk.jmh.Main.main(args);
+ }
+}
diff --git a/src/test/java/io/github/arompr/snowflake/SnowflakeBenchmark.java b/src/test/java/io/github/arompr/snowflake/SnowflakeBenchmark.java
new file mode 100644
index 0000000..cd7a85a
--- /dev/null
+++ b/src/test/java/io/github/arompr/snowflake/SnowflakeBenchmark.java
@@ -0,0 +1,39 @@
+package io.github.arompr.snowflake;
+
+import org.openjdk.jmh.annotations.*;
+
+import java.util.concurrent.TimeUnit;
+
+@BenchmarkMode(Mode.Throughput)
+@OutputTimeUnit(TimeUnit.MILLISECONDS)
+@Warmup(iterations = 5, time = 1)
+@Measurement(iterations = 5, time = 1)
+@Fork(2)
+@State(Scope.Benchmark)
+public class SnowflakeBenchmark {
+
+ private SnowflakeIdGenerator generator;
+
+ @Setup
+ public void setup() {
+ generator = new SnowflakeIdGenerator(1);
+ }
+
+ @Benchmark
+ @Threads(1)
+ public long singleThread() {
+ return generator.nextId();
+ }
+
+ @Benchmark
+ @Threads(8)
+ public long eightThreads() {
+ return generator.nextId();
+ }
+
+ @Benchmark
+ @Threads(32)
+ public long thirtyTwoThreads() {
+ return generator.nextId();
+ }
+}
diff --git a/src/test/java/com/callicoder/snowflake/SnowflakePerformanceTest.java b/src/test/java/io/github/arompr/snowflake/SnowflakePerformanceTest.java
similarity index 79%
rename from src/test/java/com/callicoder/snowflake/SnowflakePerformanceTest.java
rename to src/test/java/io/github/arompr/snowflake/SnowflakePerformanceTest.java
index 36f4642..d155c93 100644
--- a/src/test/java/com/callicoder/snowflake/SnowflakePerformanceTest.java
+++ b/src/test/java/io/github/arompr/snowflake/SnowflakePerformanceTest.java
@@ -1,19 +1,16 @@
-package com.callicoder.snowflake;
+package io.github.arompr.snowflake;
import org.junit.jupiter.api.Test;
-import java.time.Instant;
import java.util.concurrent.*;
-import static org.junit.jupiter.api.Assertions.*;
-
public class SnowflakePerformanceTest {
@Test
public void nextId_withSingleThread() {
int iterations = 1000000; // 1 million
- Snowflake snowflake = new Snowflake(897);
+ SnowflakeIdGenerator snowflake = new SnowflakeIdGenerator(897);
long beginTimestamp = System.currentTimeMillis();
for (int i = 0; i < iterations; i++) {
snowflake.nextId();
@@ -21,7 +18,7 @@ public void nextId_withSingleThread() {
long endTimestamp = System.currentTimeMillis();
long cost = (endTimestamp - beginTimestamp);
- long costMs = iterations/cost;
+ long costMs = iterations / cost;
System.out.println("Single Thread:: IDs per ms: " + costMs);
}
@@ -33,10 +30,10 @@ public void nextId_withMultipleThreads() throws InterruptedException {
ExecutorService executorService = Executors.newFixedThreadPool(numThreads);
CountDownLatch latch = new CountDownLatch(numThreads);
- Snowflake snowflake = new Snowflake(897);
+ SnowflakeIdGenerator snowflake = new SnowflakeIdGenerator(897);
long beginTimestamp = System.currentTimeMillis();
- for(int i = 0; i < iterations; i++) {
+ for (int i = 0; i < iterations; i++) {
executorService.submit(() -> {
snowflake.nextId();
latch.countDown();
@@ -46,7 +43,7 @@ public void nextId_withMultipleThreads() throws InterruptedException {
latch.await();
long endTimestamp = System.currentTimeMillis();
long cost = (endTimestamp - beginTimestamp);
- long costMs = iterations/cost;
+ long costMs = iterations / cost;
System.out.println(numThreads + " Threads:: IDs per ms: " + costMs);
}
-}
\ No newline at end of file
+}
diff --git a/src/test/java/com/callicoder/snowflake/SnowflakeTest.java b/src/test/java/io/github/arompr/snowflake/SnowflakeTest.java
similarity index 67%
rename from src/test/java/com/callicoder/snowflake/SnowflakeTest.java
rename to src/test/java/io/github/arompr/snowflake/SnowflakeTest.java
index 9478c8d..6721693 100644
--- a/src/test/java/com/callicoder/snowflake/SnowflakeTest.java
+++ b/src/test/java/io/github/arompr/snowflake/SnowflakeTest.java
@@ -1,4 +1,4 @@
-package com.callicoder.snowflake;
+package io.github.arompr.snowflake;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
@@ -13,7 +13,7 @@ public class SnowflakeTest {
@Test
public void nextId_shouldGenerateIdWithCorrectBitsFilled() {
- Snowflake snowflake = new Snowflake(784);
+ SnowflakeIdGenerator snowflake = new SnowflakeIdGenerator(784);
long beforeTimestamp = Instant.now().toEpochMilli();
@@ -28,46 +28,49 @@ public void nextId_shouldGenerateIdWithCorrectBitsFilled() {
@Test
public void nextId_shouldGenerateUniqueId() {
- Snowflake snowflake = new Snowflake(234);
+ SnowflakeIdGenerator snowflake = new SnowflakeIdGenerator(234);
int iterations = 5000;
// Validate that the IDs are not same even if they are generated in the same ms
long[] ids = new long[iterations];
- for(int i = 0; i < iterations; i++) {
+ for (int i = 0; i < iterations; i++) {
ids[i] = snowflake.nextId();
}
- for(int i = 0; i < ids.length; i++) {
- for(int j = i+1; j < ids.length; j++) {
+ for (int i = 0; i < ids.length; i++) {
+ for (int j = i + 1; j < ids.length; j++) {
assertFalse(ids[i] == ids[j]);
}
}
}
@Test
- public void nextId_shouldGenerateUniqueIdIfCalledFromMultipleThreads() throws InterruptedException, ExecutionException {
+ public void nextId_shouldGenerateUniqueIdIfCalledFromMultipleThreads()
+ throws InterruptedException, ExecutionException {
int numThreads = 50;
ExecutorService executorService = Executors.newFixedThreadPool(numThreads);
CountDownLatch latch = new CountDownLatch(numThreads);
- Snowflake snowflake = new Snowflake(234);
+ SnowflakeIdGenerator snowflake = new SnowflakeIdGenerator(234);
int iterations = 10000;
- // Validate that the IDs are not same even if they are generated in the same ms in different threads
+ // Validate that the IDs are not same even if they are generated in the same ms
+ // in different threads
Future[] futures = new Future[iterations];
- for(int i = 0; i < iterations; i++) {
- futures[i] = executorService.submit(() -> {
+ for (int i = 0; i < iterations; i++) {
+ futures[i] = executorService.submit(() -> {
long id = snowflake.nextId();
- latch.countDown();;
+ latch.countDown();
+ ;
return id;
});
}
latch.await();
- for(int i = 0; i < futures.length; i++) {
- for(int j = i+1; j < futures.length; j++) {
+ for (int i = 0; i < futures.length; i++) {
+ for (int j = i + 1; j < futures.length; j++) {
assertFalse(futures[i].get() == futures[j].get());
}
}
}
-}
\ No newline at end of file
+}