[{"body":"","link":"https://gitorko.github.io/","section":"","tags":null,"title":""},{"body":"","link":"https://gitorko.github.io/tags/arduino/","section":"tags","tags":null,"title":"Arduino"},{"body":"","link":"https://gitorko.github.io/categories/arduino/","section":"categories","tags":null,"title":"Arduino"},{"body":"Arduino projects\nBluetooth Car Item required\nArduino Uno L293D Motor Driver Motor \u0026amp; Wheels Bluetooth Module HS-06 Lithium Battery Battery Holder Install Arduino IDE \u0026amp; install the \u0026quot;Adafruit Motor Shield v1\u0026quot; library\nInstall \u0026quot;Serial Bluetooth Terminal\u0026quot; android/ios app, connect to bluetooth and send text 'F' \u0026amp; 'S' Install Arduino Bluetooth Controller on the phone and control the car\nhttps://play.google.com/store/apps/details?id=com.giristudio.hc05.bluetooth.arduino.control\n1#include \u0026lt;AFMotor.h\u0026gt; 2#include \u0026lt;SoftwareSerial.h\u0026gt; 3 4SoftwareSerial bluetoothSerial(9, 10); // RX, TX 5 6AF_DCMotor motor1(1, MOTOR12_1KHZ); 7AF_DCMotor motor2(2, MOTOR12_1KHZ); 8AF_DCMotor motor3(3, MOTOR34_1KHZ); 9AF_DCMotor motor4(4, MOTOR34_1KHZ); 10 11char command; 12int speed = 180; 13int speed_reducing_factor = 3; 14 15void setup() { 16 Serial.begin(9600); 17 bluetoothSerial.begin(9600); 18 19 Serial.println(\u0026#34;Waiting for Bluetooth data...\u0026#34;); 20} 21 22void loop() { 23 if (bluetoothSerial.available()) { 24 command = bluetoothSerial.read(); 25 Serial.print(\u0026#34;Received: \u0026#34;); 26 Serial.println(command); 27 28 if (command == \u0026#39;F\u0026#39;) Forward(); 29 else if (command == \u0026#39;B\u0026#39;) Backward(); 30 else if (command == \u0026#39;R\u0026#39;) TurnRight(); 31 else if (command == \u0026#39;L\u0026#39;) TurnLeft(); 32 else if (command == \u0026#39;S\u0026#39;) Stop(); 33 } 34} 35 36void Forward() { 37 motor1.setSpeed(speed); 38 motor2.setSpeed(speed); 39 motor3.setSpeed(speed); 40 motor4.setSpeed(speed); 41 motor1.run(FORWARD); 42 motor2.run(FORWARD); 43 motor3.run(FORWARD); 44 motor4.run(FORWARD); 45} 46 47void Backward() { 48 motor1.setSpeed(speed); 49 motor2.setSpeed(speed); 50 motor3.setSpeed(speed); 51 motor4.setSpeed(speed); 52 motor1.run(BACKWARD); 53 motor2.run(BACKWARD); 54 motor3.run(BACKWARD); 55 motor4.run(BACKWARD); 56} 57 58void TurnRight() { 59 motor1.setSpeed(speed); 60 motor2.setSpeed(speed); 61 motor3.setSpeed(speed); 62 motor4.setSpeed(speed); 63 motor1.run(FORWARD); 64 motor2.run(FORWARD); 65 motor3.run(BACKWARD); 66 motor4.run(BACKWARD); 67} 68 69void TurnLeft() { 70 motor1.setSpeed(speed); 71 motor2.setSpeed(speed); 72 motor3.setSpeed(speed); 73 motor4.setSpeed(speed); 74 motor1.run(BACKWARD); 75 motor2.run(BACKWARD); 76 motor3.run(FORWARD); 77 motor4.run(FORWARD); 78} 79 80void Stop() { 81 motor1.run(RELEASE); 82 motor2.run(RELEASE); 83 motor3.run(RELEASE); 84 motor4.run(RELEASE); 85} Reference https://www.youtube.com/watch?v=Pqs-3GgWW3s\n","link":"https://gitorko.github.io/post/arduino/","section":"post","tags":["arduino"],"title":"Arduino Projects"},{"body":"","link":"https://gitorko.github.io/categories/","section":"categories","tags":null,"title":"Categories"},{"body":"","link":"https://gitorko.github.io/tags/index/","section":"tags","tags":null,"title":"Index"},{"body":"","link":"https://gitorko.github.io/post/","section":"post","tags":["index"],"title":"Posts"},{"body":"","link":"https://gitorko.github.io/tags/","section":"tags","tags":null,"title":"Tags"},{"body":"","link":"https://gitorko.github.io/tags/agent/","section":"tags","tags":null,"title":"Agent"},{"body":"","link":"https://gitorko.github.io/categories/agent/","section":"categories","tags":null,"title":"Agent"},{"body":"","link":"https://gitorko.github.io/tags/ai/","section":"tags","tags":null,"title":"Ai"},{"body":"","link":"https://gitorko.github.io/categories/ai/","section":"categories","tags":null,"title":"AI"},{"body":"","link":"https://gitorko.github.io/categories/java/","section":"categories","tags":null,"title":"Java"},{"body":"","link":"https://gitorko.github.io/tags/llm/","section":"tags","tags":null,"title":"Llm"},{"body":"","link":"https://gitorko.github.io/categories/llm/","section":"categories","tags":null,"title":"LLM"},{"body":"","link":"https://gitorko.github.io/tags/ollama/","section":"tags","tags":null,"title":"Ollama"},{"body":"","link":"https://gitorko.github.io/categories/ollama/","section":"categories","tags":null,"title":"Ollama"},{"body":"","link":"https://gitorko.github.io/tags/postgres/","section":"tags","tags":null,"title":"Postgres"},{"body":"","link":"https://gitorko.github.io/categories/spring/","section":"categories","tags":null,"title":"Spring"},{"body":"Spring AI - Ollama (Chat Model)\nGithub: https://github.com/gitorko/project09\nSpring AI Ollama is a platform designed to allow developers to run large language models (LLMs) locally.\nIn this example we will run the llama3.1 LLM model which will run locally and write an AI agent that can interact with the postgres database to create a TODO task application.\nCode 1package com.demo.project09.controller; 2 3import com.demo.project09.agent.ChatAgent; 4import com.demo.project09.agent.TodoAgent; 5import org.springframework.beans.factory.annotation.Autowired; 6import org.springframework.web.bind.annotation.*; 7 8import java.time.Instant; 9 10@RestController 11@RequestMapping(\u0026#34;/\u0026#34;) 12public class HomeController { 13 14 @Autowired 15 TodoAgent todoAgent; 16 17 @Autowired 18 ChatAgent chatAgent; 19 20 @GetMapping(\u0026#34;/time\u0026#34;) 21 public String getDate() { 22 return Instant.now().toString(); 23 } 24 25 @PostMapping(\u0026#34;/talk\u0026#34;) 26 public String talk(@RequestBody String message) { 27 return chatAgent.talk(message); 28 } 29 30 @PostMapping(\u0026#34;/agent\u0026#34;) 31 public String agent(@RequestBody String message) { 32 String chatId = \u0026#34;10\u0026#34;; 33 return todoAgent.talk(chatId, message); 34 } 35} 1package com.demo.project09.config; 2 3import org.springframework.ai.chat.client.ChatClient; 4import org.springframework.ai.chat.client.advisor.MessageChatMemoryAdvisor; 5import org.springframework.ai.chat.client.advisor.QuestionAnswerAdvisor; 6import org.springframework.ai.chat.memory.ChatMemory; 7import org.springframework.ai.chat.memory.InMemoryChatMemory; 8import org.springframework.ai.chat.model.ChatModel; 9import org.springframework.ai.embedding.EmbeddingModel; 10import org.springframework.ai.vectorstore.SimpleVectorStore; 11import org.springframework.ai.vectorstore.VectorStore; 12import org.springframework.context.annotation.Bean; 13import org.springframework.context.annotation.Configuration; 14 15@Configuration 16public class ChatConfig { 17 18 @Bean 19 ChatClient chatClient(ChatModel chatModel) { 20 return ChatClient.create(chatModel); 21 } 22 23 @Bean 24 ChatClient agentClient(ChatModel chatModel, VectorStore vectorStore, ChatMemory chatMemory) { 25 ChatClient.Builder builder = ChatClient.builder(chatModel); 26 return builder.defaultSystem(\u0026#34;\u0026#34;\u0026#34; 27 You are a todo task application bot named \u0026#34;alexa\u0026#34; for the application \u0026#34;FunApp\u0026#34; 28 Respond in a friendly, helpful, and joyful manner. 29 You are interacting with customers through an online chat system where they can add, remove and get todo tasks 30 Before adding a todo task you MUST get the task description string from the user. 31 Before getting the specific todo task you need to ask the user to provide the task number. 32 Before getting tasks that the user wants you need to get the single keyword in the task from the user. 33 To delete a todo task take the task number from the user 34 To search a todo task take the keyword string from the user 35 Use the provided functions to fetch todo tasks, add todo task, remove todo tasks and search todo tasks. 36 Today is {current_date}. 37 \u0026#34;\u0026#34;\u0026#34;) 38 .defaultAdvisors( 39 new MessageChatMemoryAdvisor(chatMemory), // chat-memory advisor 40 new QuestionAnswerAdvisor(vectorStore) // RAG advisor 41 ) 42 .defaultFunctions(\u0026#34;getTodo\u0026#34;, \u0026#34;addTodo\u0026#34;, \u0026#34;deleteTodo\u0026#34;, \u0026#34;searchTodo\u0026#34;) 43 .build(); 44 } 45 46 @Bean 47 public VectorStore vectorStore(EmbeddingModel embeddingModel) { 48 return SimpleVectorStore.builder(embeddingModel) 49 .build(); 50 } 51 52 @Bean 53 public ChatMemory chatMemory() { 54 return new InMemoryChatMemory(); 55 } 56 57} 1package com.demo.project09.agent; 2 3import lombok.RequiredArgsConstructor; 4import org.springframework.ai.chat.client.ChatClient; 5import org.springframework.stereotype.Service; 6 7@Service 8@RequiredArgsConstructor 9public class ChatAgent { 10 11 private final ChatClient chatClient; 12 13 public String talk(String message) { 14 return chatClient.prompt().user(message).call().content(); 15 } 16} 1package com.demo.project09.agent; 2 3import com.demo.project09.domain.TodoTask; 4import com.demo.project09.service.TodoService; 5import com.fasterxml.jackson.annotation.JsonInclude; 6import lombok.RequiredArgsConstructor; 7import lombok.extern.slf4j.Slf4j; 8import org.springframework.ai.chat.client.ChatClient; 9import org.springframework.context.annotation.Bean; 10import org.springframework.context.annotation.Description; 11import org.springframework.stereotype.Service; 12 13import java.time.LocalDate; 14import java.util.function.Consumer; 15import java.util.function.Function; 16 17@Service 18@Slf4j 19@RequiredArgsConstructor 20public class TodoAgent { 21 22 public static final String CHAT_MEMORY_CONVERSATION_ID = \u0026#34;chat_memory_conversation_id\u0026#34;; 23 public static final String CHAT_MEMORY_RESPONSE_SIZE = \u0026#34;chat_memory_response_size\u0026#34;; 24 private final ChatClient agentClient; 25 private final TodoService todoService; 26 27 public String talk(String chatId, String message) { 28 return agentClient.prompt() 29 .system(s -\u0026gt; s.param(\u0026#34;current_date\u0026#34;, LocalDate.now().toString())) 30 .advisors(advisor -\u0026gt; advisor.param(CHAT_MEMORY_CONVERSATION_ID, chatId) 31 .param(CHAT_MEMORY_RESPONSE_SIZE, 100)) 32 .user(message) 33 .call() 34 .content(); 35 } 36 37 @Bean 38 @Description(\u0026#34;Get Todo\u0026#34;) 39 public Function\u0026lt;Void, Iterable\u0026lt;TodoTask\u0026gt;\u0026gt; getTodo() { 40 return request -\u0026gt; { 41 return todoService.getTodo(); 42 }; 43 44 } 45 46 @Bean 47 @Description(\u0026#34;Add Todo\u0026#34;) 48 public Function\u0026lt;TodoTask, TodoTask\u0026gt; addTodo() { 49 return request -\u0026gt; { 50 return todoService.addTodo(request.getTaskDescription()); 51 }; 52 } 53 54 @Bean 55 @Description(\u0026#34;Delete Todo\u0026#34;) 56 public Consumer\u0026lt;TodoTask\u0026gt; deleteTodo() { 57 return request -\u0026gt; { 58 todoService.deleteTodo(request.getId()); 59 }; 60 } 61 62 @Bean 63 @Description(\u0026#34;Search Todo\u0026#34;) 64 public Function\u0026lt;SearchKey, TodoTask\u0026gt; searchTodo() { 65 return request -\u0026gt; { 66 return todoService.searchTodo(request.keyword); 67 }; 68 } 69 70 @JsonInclude(JsonInclude.Include.NON_NULL) 71 public record SearchKey(String keyword) { 72 } 73} 74 Postman Import the postman collection to postman\nPostman Collection\nSetup 1# project09 2 3Spring AI with Ollama 4 5### Version 6 7Check version 8 9```bash 10$java --version 11openjdk 21 12``` 13 14### Ollama 15 16Download and install ollama 17[https://ollama.com/](https://ollama.com/) 18 19Run the model 20 21```bash 22ollama run llama3.1 23ollama pull mxbai-embed-large 24``` 25 26### Postgres DB 27 28```bash 29docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:14 30docker ps 31docker exec -it pg-container psql -U postgres -W postgres 32CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 33CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 34grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 35 36docker stop pg-container 37docker start pg-container 38``` 39 40### Dev 41 42To run the backend in dev mode. 43 44```bash 45./gradlew clean build 46./gradlew bootRun 47``` References https://spring.io/projects/spring-ai/\nhttps://docs.spring.io/spring-ai/reference/api/chat/ollama-chat.html\nhttps://hub.docker.com/r/ollama/ollama\nhttps://ollama.com/\n","link":"https://gitorko.github.io/post/spring-ai-ollama/","section":"post","tags":["ollama","ai","llm","agent","postgres"],"title":"Spring AI - Ollama with AI Agent"},{"body":"","link":"https://gitorko.github.io/tags/watermarking/","section":"tags","tags":null,"title":"Watermarking"},{"body":"Tesla used space characters in internal emails to identify leaks\nGithub: https://github.com/gitorko/project06\nWatermarking The below code embeds a binary message with spaces in a given text, this can be used to trace the origin of the text message and which user it belonged to.\nCode 1package com.demo.project06; 2 3import java.util.ArrayList; 4import java.util.List; 5 6public class Main { 7 8 public static String[] userNames = {\u0026#34;jack@company.com\u0026#34;, \u0026#34;adam@company.com\u0026#34;, \u0026#34;sally@company.com\u0026#34;}; 9 10 public static String[][] users; 11 12 public static void main(String[] args) { 13 String message = \u0026#34;Hello, How are you today? This is a test message for watermarking. Have a great day!\u0026#34;; 14 String[] sentences = message.split(\u0026#34;(?\u0026lt;=[.\\\\?])\u0026#34;); 15 if (sentences.length \u0026lt; 3) { 16 throw new RuntimeException(\u0026#34;Sentences are too few to encode watermark\u0026#34;); 17 } 18 generateUserBinaryIds(); 19 // Encoding and Decoding process 20 String user = \u0026#34;adam@company.com\u0026#34;; 21 String encodedMessage = encodeMessage(message, user); 22 System.out.println(\u0026#34;Watermarked message for \u0026#34; + user + \u0026#34;: \\n\u0026#34; + encodedMessage); 23 24 String decodedUser = decodeMessage(encodedMessage); 25 System.out.println(\u0026#34;Message belongs to: \u0026#34; + decodedUser); 26 } 27 28 private static void generateUserBinaryIds() { 29 30 users = new String[userNames.length + 1][2]; 31 users[0][0] = \u0026#34;NA\u0026#34;; 32 users[0][1] = \u0026#34;000\u0026#34;; 33 34 for (int i = 1; i \u0026lt;= userNames.length; i++) { 35 // Convert the index into a binary string with padding (e.g., \u0026#34;001\u0026#34;, \u0026#34;010\u0026#34;, \u0026#34;011\u0026#34;, ...) 36 String binaryId = String.format(\u0026#34;%3s\u0026#34;, Integer.toBinaryString(i)).replace(\u0026#39; \u0026#39;, \u0026#39;0\u0026#39;); 37 users[i][0] = userNames[i - 1]; 38 users[i][1] = binaryId; 39 } 40 } 41 42 public static String encodeMessage(String message, String user) { 43 // Loop through the users to find the binary ID of the user 44 for (String[] u : users) { 45 if (user.equals(u[0])) { 46 String binaryId = u[1]; 47 return watermarkMessage(message, binaryId); 48 } 49 } 50 throw new RuntimeException(\u0026#34;Unknown user: \u0026#34; + user); 51 } 52 53 public static String decodeMessage(String message) { 54 // Split the message into sentences, keeping the period (. or ?) 55 String[] sentences = message.split(\u0026#34;(?\u0026lt;=[.\\\\?])\u0026#34;); 56 StringBuilder binaryPattern = new StringBuilder(); 57 for (String sentence : sentences) { 58 int spaceCount = countLeadingSpaces(sentence); 59 binaryPattern.append(spaceCount == 0 ? \u0026#34;0\u0026#34; : \u0026#34;1\u0026#34;); 60 } 61 System.out.println(\u0026#34;Binary Code: \u0026#34; + binaryPattern); 62 for (String[] user : users) { 63 String name = user[0]; 64 String binaryId = user[1]; 65 if (binaryPattern.toString().equals(binaryId)) { 66 return name; 67 } 68 } 69 return \u0026#34;Unknown User\u0026#34;; 70 } 71 72 private static String watermarkMessage(String message, String binaryId) { 73 // Split the message into sentences, keeping the period (. or ?) 74 String[] sentences = message.split(\u0026#34;(?\u0026lt;=[.\\\\?])\u0026#34;); 75 76 List\u0026lt;String\u0026gt; watermarkedSentences = new ArrayList\u0026lt;\u0026gt;(); 77 for (int i = 0; i \u0026lt; sentences.length; i++) { 78 char spaceCount = binaryId.charAt(i % binaryId.length()); 79 int spacesToAdd = spaceCount == \u0026#39;1\u0026#39; ? 1 : 0; // If \u0026#39;1\u0026#39;, add 1 space; if \u0026#39;0\u0026#39;, add 0 spaces 80 String sentenceWithSpaces = \u0026#34; \u0026#34;.repeat(spacesToAdd) + sentences[i]; 81 watermarkedSentences.add(sentenceWithSpaces); 82 } 83 return String.join(\u0026#34;\u0026#34;, watermarkedSentences); 84 } 85 86 private static int countLeadingSpaces(String sentence) { 87 int count = 0; 88 int length = sentence.length(); 89 while (count \u0026lt; length \u0026amp;\u0026amp; sentence.charAt(count) == \u0026#39; \u0026#39;) { 90 count++; 91 } 92 return count \u0026gt; 1 ? 1 : 0; 93 } 94} Setup 1# project06 2 3Encode and Decode watermarked text message 4 5### Version 6 7Check version 8 9```bash 10$java --version 11openjdk 21 12``` References https://en.wikipedia.org/wiki/Text_watermarking\n","link":"https://gitorko.github.io/post/watermarking-text/","section":"post","tags":["watermarking"],"title":"Watermarking Text"},{"body":"Apache Spark is an open-source analytics engine \u0026amp; cluster-compute framework that processes large-scale data.\nSpark supports in-memory caching and optimized query execution for fast analytics. It has built-in modules for machine learning, graph processing, streaming, and SQL\nGithub: https://github.com/gitorko/project08\nApache Spark Spark applications run as independent sets of processes on a cluster. Spark uses RDD (Resilient Distributed Datasets) to store shared data.\nApache Spark Apache Kafka Analyze large datasets that cant fit on single machine (big data) Process large events as they happen and stores them distributed manner Processing data task runs on many nodes Processing data task is delegated to clients/consumers Provides machine learning libraries (MLlib), graph processing and SQL querying No machine learning libraries/ Graph processing / SQL querying provided Batch and stream processing, ETL jobs, and complex analytics Real-time data streaming, building data pipelines, and handling event-driven architectures Not suited for event or message handling (producer-consumer) Integrating disparate systems for message passing and event storage (producer-consumer) Spark can be complex to set up and tune, especially in a distributed environment Kafka is relatively easier to set up for streaming and messaging Code 1package com.demo.project08.service; 2 3import java.util.ArrayList; 4import java.util.Arrays; 5import java.util.List; 6 7import lombok.extern.slf4j.Slf4j; 8import org.apache.spark.api.java.JavaPairRDD; 9import org.apache.spark.api.java.JavaRDD; 10import org.apache.spark.api.java.JavaSparkContext; 11import org.apache.spark.sql.Dataset; 12import org.apache.spark.sql.Row; 13import org.apache.spark.sql.SparkSession; 14import scala.Tuple2; 15 16@Slf4j 17public class SparkService { 18 19 private static SparkSession sparkSession; 20 private static JavaSparkContext sparkContext; 21 22 private String sparkMaster; 23 24 public SparkService(String sparkMaster) { 25 this.sparkMaster = sparkMaster; 26 } 27 28 // Singleton for SparkSession 29 private SparkSession getSparkSession() { 30 if (sparkSession == null) { 31 synchronized (SparkService.class) { 32 if (sparkSession == null) { 33 sparkSession = SparkSession.builder() 34 .appName(\u0026#34;Spark App\u0026#34;) 35 .master(sparkMaster) 36 .getOrCreate(); 37 } 38 } 39 } 40 return sparkSession; 41 } 42 43 // Singleton for SparkContext 44 private JavaSparkContext getSparkContext() { 45 if (sparkContext == null) { 46 synchronized (SparkService.class) { 47 if (sparkContext == null) { 48 var sparkSession = getSparkSession(); 49 sparkContext = new JavaSparkContext(sparkSession.sparkContext()); 50 } 51 } 52 } 53 return sparkContext; 54 } 55 56 public List\u0026lt;String\u0026gt; getWordCount() { 57 JavaRDD\u0026lt;String\u0026gt; inputFile = getSparkContext().textFile(\u0026#34;/tmp/data/word-file.txt\u0026#34;); 58 JavaRDD\u0026lt;String\u0026gt; wordsFromFile 59 = inputFile.flatMap(s -\u0026gt; Arrays.asList(s.split(\u0026#34; \u0026#34;)).iterator()); 60 JavaPairRDD countData = wordsFromFile.mapToPair(t -\u0026gt; new Tuple2(t, 1)).reduceByKey((x, y) -\u0026gt; (int) x + (int) y); 61 List\u0026lt;Tuple2\u0026lt;String, Integer\u0026gt;\u0026gt; output = countData.collect(); 62 List\u0026lt;String\u0026gt; result = new ArrayList\u0026lt;\u0026gt;(); 63 for (Tuple2\u0026lt;?, ?\u0026gt; tuple : output) { 64 log.info(tuple._1() + \u0026#34;: \u0026#34; + tuple._2()); 65 result.add(tuple._1() + \u0026#34;: \u0026#34; + tuple._2()); 66 } 67 return result; 68 } 69 70 public List\u0026lt;String\u0026gt; processJson() { 71 // Create a sample dataset 72 Dataset\u0026lt;Row\u0026gt; data = getSparkSession().read().json(\u0026#34;/tmp/data/customers.json\u0026#34;); 73 74 // Perform a transformation 75 Dataset\u0026lt;Row\u0026gt; result = data.select(\u0026#34;name\u0026#34;).where(\u0026#34;age \u0026gt; 30\u0026#34;); 76 77 // Collect the result into a list 78 var names = result.toJavaRDD() 79 .map(row -\u0026gt; row.getString(0)) 80 .collect(); 81 82 names.forEach(n -\u0026gt; { 83 log.info(\u0026#34;Name: {}\u0026#34;, n); 84 }); 85 return names; 86 } 87 88 public void shutdownSpark() { 89 if (sparkContext != null) { 90 sparkContext.stop(); 91 sparkContext = null; 92 } 93 if (sparkSession != null) { 94 sparkSession.stop(); 95 sparkSession = null; 96 } 97 } 98 99} Setup 1# project08 2 3Apache Spark 4 5### Version 6 7Check version 8 9```bash 10$java --version 11openjdk 17 12``` 13 14### Dev 15 16To build the code. 17 18```bash 19./gradlew clean build 20./gradlew shadowJar 21cp build/libs/project08-fat-1.0.0-all.jar /tmp/data/ 22cp src/main/resources/word-file.txt /tmp/data 23cp src/main/resources/customers.json /tmp/data 24``` 25 26```bash 27cd docker 28docker-compose up -d 29docker-compose down 30ipconfig getifaddr en0 31``` 32 33Scala version check on spark cluster 34 35```bash 36spark-shell 37scala.util.Properties.versionString 38res0: String = version 2.12.18 39``` 40 41```bash 42cd /opt/bitnami/spark 43cd bin 44spark-submit --class com.demo.project08.Main --master spark://10.177.182.61:7077 /tmp/data/project08-fat-1.0.0-all.jar 45``` 46 47 48http://localhost:8090 49http://localhost:8091 References https://spark.apache.org/\n","link":"https://gitorko.github.io/post/apache-spark/","section":"post","tags":["spark"],"title":"Apache Spark"},{"body":"","link":"https://gitorko.github.io/categories/apachespark/","section":"categories","tags":null,"title":"ApacheSpark"},{"body":"","link":"https://gitorko.github.io/tags/spark/","section":"tags","tags":null,"title":"Spark"},{"body":"","link":"https://gitorko.github.io/tags/kafka/","section":"tags","tags":null,"title":"Kafka"},{"body":"","link":"https://gitorko.github.io/categories/kafka/","section":"categories","tags":null,"title":"Kafka"},{"body":"","link":"https://gitorko.github.io/tags/kafka-stream/","section":"tags","tags":null,"title":"Kafka-Stream"},{"body":"","link":"https://gitorko.github.io/categories/messaging/","section":"categories","tags":null,"title":"Messaging"},{"body":"","link":"https://gitorko.github.io/tags/spring/","section":"tags","tags":null,"title":"Spring"},{"body":"Spring Boot 3 integration with Apache Kafka \u0026amp; Kafka streams\nGithub: https://github.com/gitorko/project80\nKafka Kafka is a distributed \u0026amp; fault-tolerant, high throughput, scalable stream processing \u0026amp; messaging system.\nKafka as publisher-subscriber messaging system. Kafka as queue (point-point) messaging system. Kafka as stream processing system that reacts to event in realtime. Kafka as a store for data. Terms\nBroker: Kafka server. Cluster: A group of kafka brokers. Topic: Logical grouping of messages. Partition: A topic can contain many partitions. Messages are stored in a partition. Offset: Used to keep track of message. Consumer Group: Reads the messages from a topic. Consumer: A consumer group can have N consumers, each will read a partition. Consumers cant be more than number of partitions. Zookeeper: Used to track the offset, consumers, topics etc. Order is guaranteed only withing a partition and not across partitions. Within a consumer group a partition can be read only by one consumer. Leader replicates partition to other replica servers based on replication count. If leader fails then follower will become leader. Zookeeper manages all brokers, keeps track of offset, consumer group, topic, partitions etc. Once a message acknowledgement fails kafka will retry and even after certain retries if it fails, the message will be moved to dead letter. Kafka provides high throughput because of the following\nKafka scales because it works on append only mode, sequential disk write is faster than random access file write Kafka copies data from disk to network by ready with zero copy. OS buffer directly copies to NIC buffer. There is no set limit to the number of topics that can exist in a Kafka cluster, each partition has a limit of 4000 partitions per broker, maximum 200,000 partitions per Kafka cluster\nKafka Use-Cases\nActivity tracking for high traffic website Processing streaming big data Monitoring financial data in real time IoT sensor data processing Kafka stores streams of records (messages) in topics. Topics are partitioned and replicated across multiple nodes thus kafka can scale and be a distributed system. Producers publish data to the topics. Consumer groups can subscribe to topics.\nAdvantages\nData Store - Kafka is append only commit log. Which means it can also act as a data store. Queue (point-point) - If only one consumer group subscribes to a topic it behaves like a Queue (point-point) messaging system. Pub-Sub - If more than one consumer group subscribe to a topic it behaves like Pub-Sub messaging system. Consumer Group - Number of consumers in a group must be less than or equal to number of partitions. Cant have more consumers in a group than there are partitions. Partition - Producer can only write to topic but which partition the data gets written to is not in its control. Partition - When you add a new kafka broker the partition is replicated so loss of one node doesn't crash the system. Ordering - Ordering of messages is guaranteed only in a partition and not across partitions. Offset - Consumer can choose to read records from latest or from beginning. Long polling - Uses poll model compared to RabbitMQ which uses push model Adapters - Provides adapters that can be used to write data to db and other endpoints Stream - Provides stream processing capabilities Similar to spring rest template or jdbc template which abstracts the rest/jdbc calls spring provides kafka template which provides high level abstraction to interact with kafka. There is an even higher level of abstraction provided by spring cloud stream which lets we integrate with kafka or rabbitmq and other messaging systems. So when the messaging systems changes you dont need to make code changes in producer or consumer.\nCode 1package com.demo.project80; 2 3import java.util.Arrays; 4import java.util.List; 5import java.util.Random; 6import java.util.UUID; 7import java.util.concurrent.TimeUnit; 8 9import com.demo.project80.domain.User; 10import lombok.extern.slf4j.Slf4j; 11import org.springframework.beans.factory.annotation.Value; 12import org.springframework.boot.CommandLineRunner; 13import org.springframework.boot.SpringApplication; 14import org.springframework.boot.autoconfigure.SpringBootApplication; 15import org.springframework.context.annotation.Bean; 16import org.springframework.kafka.core.KafkaTemplate; 17 18@SpringBootApplication 19@Slf4j 20public class KafkaProducer { 21 22 @Value(value = \u0026#34;${topic.name}\u0026#34;) 23 private String topicName; 24 25 public static void main(String[] args) { 26 SpringApplication.run(KafkaProducer.class, args); 27 } 28 29 @Bean 30 public CommandLineRunner onStart(KafkaTemplate\u0026lt;String, User\u0026gt; kafkaTemplate) { 31 return (args) -\u0026gt; { 32 List\u0026lt;String\u0026gt; users = Arrays.asList(\u0026#34;david\u0026#34;, \u0026#34;john\u0026#34;, \u0026#34;raj\u0026#34;, \u0026#34;peter\u0026#34;); 33 Random random = new Random(); 34 for (int i = 0; i \u0026lt; 10; i++) { 35 User user = new User(users.get(random.nextInt(users.size())), random.nextInt(100)); 36 log.info(\u0026#34;Sending User: {}\u0026#34;, user); 37 String key = UUID.randomUUID().toString(); 38 kafkaTemplate.send(topicName, key, user); 39 TimeUnit.SECONDS.sleep(10); 40 } 41 }; 42 } 43 44} 1package com.demo.project80; 2 3import com.demo.project80.domain.User; 4import lombok.extern.slf4j.Slf4j; 5import org.springframework.boot.SpringApplication; 6import org.springframework.boot.autoconfigure.SpringBootApplication; 7import org.springframework.kafka.annotation.KafkaListener; 8 9@SpringBootApplication 10@Slf4j 11public class KafkaConsumer { 12 13 public static void main(String[] args) { 14 SpringApplication.run(KafkaConsumer.class, args); 15 } 16 17 @KafkaListener(id = \u0026#34;my-client-app\u0026#34;, topics = \u0026#34;${topic.name}\u0026#34;, groupId = \u0026#34;group-01\u0026#34;) 18 public void topicConsumer(User user) { 19 log.info(\u0026#34;Received User : {}\u0026#34;, user); 20 } 21 22} 1version: \u0026#39;2\u0026#39; 2services: 3 zookeeper: 4 container_name: zookeeper 5 image: \u0026#39;bitnami/zookeeper:latest\u0026#39; 6 ports: 7 - 2181:2181 8 environment: 9 - ALLOW_ANONYMOUS_LOGIN=yes 10 kafkaserver: 11 hostname: kafkaserver 12 container_name: kafkaserver 13 image: \u0026#39;bitnami/kafka:latest\u0026#39; 14 ports: 15 - 9092:9092 16 depends_on: 17 - zookeeper 18 environment: 19 - KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 20 - KAFKA_ADVERTISED_HOST_NAME=kafkaserver 21 - ALLOW_PLAINTEXT_LISTENER=yes 22 links: 23 - zookeeper:zookeeper 24 kafka-ui: 25 container_name: kafka-ui 26 image: provectuslabs/kafka-ui:latest 27 ports: 28 - 9090:8080 29 depends_on: 30 - zookeeper 31 - kafkaserver 32 environment: 33 KAFKA_CLUSTERS_0_NAME: local 34 KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafkaserver:9092 35 KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper:2181 The group id of your client which uses group management to assign topic partitions to consumers The auto-offset-reset=earliest ensures the new consumer group will get the oldest available message.\nWe can have multiple kafka listener for a topic with different group id A consumer can listen to more than one topic. We have created the topic 'mytopic' with only one partition. For a topic with multiple partitions, @KafkaListener can explicitly subscribe to a particular partition of a topic with an initial offset.\n1@KafkaListener(topics = \u0026#34;topic1\u0026#34;, group = \u0026#34;group1\u0026#34;) 2@KafkaListener(topics = \u0026#34;topic1,topic2\u0026#34;, group = \u0026#34;group1\u0026#34;) 3@KafkaListener(topicPartitions = @TopicPartition(topic = \u0026#34;topic1\u0026#34;, 4 partitionOffsets = { 5 @PartitionOffset(partition = \u0026#34;0\u0026#34;, initialOffset = \u0026#34;0\u0026#34;), 6 @PartitionOffset(partition = \u0026#34;2\u0026#34;, initialOffset = \u0026#34;0\u0026#34;) 7})) Kafka Streams Kafka Streams has stream-table duality. Tables are a set of evolving facts. Each new event overwrites the old one, whereas streams are a collection of immutable facts. Kafka Streams provides two abstractions for Streams and Tables. KStream handles the stream of records. KTable manages the changelog stream with the latest state of a given key For not partitioned tables we can use GlobalKTables to broadcast information to all tasks.\nWhen we use other projects like apache spark, storm, flink we write code and copy the jar to the nodes where the actual work happens. With the introduction of kafka stream we can now write our processing logic for streams, then it can run anywhere the jar can run. KafkaStreams enables us to consume from Kafka topics, analyze or transform data, and potentially, send it to another Kafka topic.\nWe will now count the users by age group.\nCode 1package com.demo.project80; 2 3import java.util.concurrent.TimeUnit; 4 5import com.demo.project80.domain.User; 6import lombok.extern.slf4j.Slf4j; 7import org.apache.kafka.common.serialization.Serde; 8import org.apache.kafka.common.serialization.Serdes; 9import org.apache.kafka.streams.KafkaStreams; 10import org.apache.kafka.streams.KeyValue; 11import org.apache.kafka.streams.StreamsBuilder; 12import org.apache.kafka.streams.Topology; 13import org.apache.kafka.streams.kstream.Consumed; 14import org.apache.kafka.streams.kstream.Grouped; 15import org.apache.kafka.streams.kstream.KStream; 16import org.apache.kafka.streams.kstream.KTable; 17import org.springframework.beans.factory.annotation.Value; 18import org.springframework.boot.CommandLineRunner; 19import org.springframework.boot.SpringApplication; 20import org.springframework.boot.autoconfigure.SpringBootApplication; 21import org.springframework.context.annotation.Bean; 22import org.springframework.kafka.config.KafkaStreamsConfiguration; 23import org.springframework.kafka.support.serializer.JsonSerde; 24 25@SpringBootApplication 26@Slf4j 27public class KafkaStream { 28 29 private static final Serde\u0026lt;String\u0026gt; STRING_SERDE = Serdes.String(); 30 31 @Value(value = \u0026#34;${topic.name}\u0026#34;) 32 private String topicName; 33 34 public static void main(String[] args) { 35 SpringApplication.run(KafkaStream.class, args); 36 } 37 38 @Bean 39 public CommandLineRunner streamData(KafkaStreamsConfiguration kStreamsConfig) { 40 return (args) -\u0026gt; { 41 StreamsBuilder streamsBuilder = new StreamsBuilder(); 42 KStream\u0026lt;String, User\u0026gt; streamOfUsers = streamsBuilder 43 .stream(topicName, Consumed.with(STRING_SERDE, new JsonSerde\u0026lt;\u0026gt;(User.class))); 44 45 streamOfUsers.foreach((k, v) -\u0026gt; { 46 log.info(\u0026#34;user: {}, age: {}\u0026#34;, v.getName(), v.getAge()); 47 }); 48 49 KTable\u0026lt;String, Long\u0026gt; employeeCountByCompany = streamOfUsers 50 .map((k, v) -\u0026gt; new KeyValue\u0026lt;\u0026gt;(v.getAge(), String.valueOf(v.getAge()))) 51 .groupBy((k, w) -\u0026gt; w, Grouped.with(STRING_SERDE, STRING_SERDE)) 52 .count(); 53 employeeCountByCompany.toStream().foreach((w, c) -\u0026gt; log.info(\u0026#34;Age: {} , Count: {}\u0026#34;, w, c)); 54 55 Topology topology = streamsBuilder.build(); 56 KafkaStreams streams = new KafkaStreams(topology, kStreamsConfig.asProperties()); 57 streams.cleanUp(); 58 streams.start(); 59 TimeUnit.SECONDS.sleep(10); 60 Runtime.getRuntime().addShutdownHook(new Thread(streams::close)); 61 }; 62 } 63 64} Run the main method of KafkaStream.\nSetup 1# Project 80 2 3Spring Boot \u0026amp; Kafka 4 5[https://gitorko.github.io/spring-apache-kafka/](https://gitorko.github.io/spring-apache-kafka/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk version \u0026#34;21.0.3\u0026#34; 2024-04-16 LTS 14``` 15 16### Kafka 17 18To run kafka we need zookeeper, use the docker compose command to run kafka as a container 19 20For windows ensure the C:\\Windows\\System32\\drivers\\etc\\hosts file has these 2 entries. 21For link ensure /etc/hosts has these 2 entries. 22 23```bash 24127.0.0.1 zookeeper 25127.0.0.1 kafkaserver 26``` 27 28```bash 29docker-compose -f docker/docker-compose.yml up 30``` 31 32To create topic 33 34```bash 35docker exec -it kafkaserver /bin/bash 36/opt/bitnami/kafka/bin/kafka-topics.sh --create --replication-factor 1 --partitions 1 --topic mytopic.000 --bootstrap-server localhost:9092 37``` 38 39Describe topic 40 41```bash 42docker exec -it kafkaserver /bin/bash 43/opt/bitnami/kafka/bin/kafka-topics.sh --list --bootstrap-server localhost:9092 44/opt/bitnami/kafka/bin/kafka-topics.sh --describe mytopic.000 --bootstrap-server localhost:9092 45``` 46 47To delete topic 48 49```bash 50docker exec -it kafkaserver /bin/bash 51/opt/bitnami/kafka/bin/kafka-topics.sh --delete --topic mytopic.000 --bootstrap-server localhost:9092 52``` 53 54Clean up 55 56```bash 57docker-compose -f docker/docker-compose.yml stop 58docker rm kafka-ui kafkaserver zookeeper 59``` 60 61Restart 62 63```bash 64docker-compose -f docker/docker-compose.yml start 65``` 66 67Dashboard for kafka, wait for a few seconds as it takes time to come up. 68 69Open [http://localhost:9090/](http://localhost:9090/) 70 71### Dev 72 73To run the code. 74 75```bash 76./gradlew clean build 77 78./gradlew :kserver:build 79./gradlew :kclient:build 80./gradlew :kcommon:build 81./gradlew :kstream:build 82 83./gradlew :kserver:bootRun 84./gradlew :kclient:bootRun 85./gradlew :kstream:bootRun 86``` References https://www.baeldung.com/java-kafka-streams\nhttps://kafka.apache.org/quickstart\nhttps://baeldung-cn.com/java-kafka-streams-vs-kafka-consumer\nhttps://tanzu.vmware.com/developer/guides/kafka-gs/\nhttps://kafka.apache.org/\n","link":"https://gitorko.github.io/post/spring-apache-kafka/","section":"post","tags":["spring","spring-boot-3","kafka","kafka-stream"],"title":"Spring Boot - Apache Kafka"},{"body":"","link":"https://gitorko.github.io/tags/spring-boot-3/","section":"tags","tags":null,"title":"Spring-Boot-3"},{"body":"","link":"https://gitorko.github.io/tags/events/","section":"tags","tags":null,"title":"Events"},{"body":"","link":"https://gitorko.github.io/categories/events/","section":"categories","tags":null,"title":"Events"},{"body":"","link":"https://gitorko.github.io/tags/liquibase/","section":"tags","tags":null,"title":"Liquibase"},{"body":"","link":"https://gitorko.github.io/categories/modulith/","section":"categories","tags":null,"title":"Modulith"},{"body":"Spring boot modulith implementation with spring events \u0026amp; persistence with postgres \u0026amp; replay of events.\nGithub: https://github.com/gitorko/project73\nSpring Modulith Modular Monolith is an architectural style where source code is structured on the concept of modules\nSpring Modulith is a module of Spring that helps in organizing large applications into well-structured, manageable, and self-contained modules. It provides various features like module isolation, events, and monitoring to support a modular architecture.\nBuilding a Modern Monolith application, with Spring Modulith lets you avoid the network jumps, serialization \u0026amp; de-serialization. Each service is isolated via package boundary. Eg: OrderService, NotificationService bean won't be injected in all the classes, instead they rely on spring event bus to communicate with each other.\nYou can structure your code based on domain, Order package deals only with processing the order, notification package deals only with sending notifications etc. We can split the core of the monolith into modules by identifying the domains of our application and defining bounded contexts. We can consider the domain or business modules of our application as direct sub-packages of the application’s main package.\nSince the application becomes a monolith, you cant individually scale out individual services, so if a particular service needs more scale you can move only that module to a separate service (microservice architecture).\nSpring events ensures loose coupling in an application, it allows inter-module interaction. Instead of injecting different beans and invoking them in the directly you now publish an event and all other places that need to process it will implement a listener.\nTightly coupled with single commit transaction boundary\n1@Transactional 2public void complete(Order order) { 3 orderService.save(order); 4 inventoryService.update(order); 5 auditService.add(order); 6 rewardService.update(order); 7 notificationService.update(order); 8} Loosely coupled but still single commit transaction boundary\n1@Transactional 2public void complete(Order order) { 3 applicationEventPublisher.publishEvent(order); 4} Service can be developed without all the implementations. Eg: Audit logging service is being developed and not ready, hence instead of being blocked on developing the core customer service class, just publish an event and when the service is ready add a listener to process that event.\nSpring events are in-memory so if the server restarts all events published will be lost. With Spring Modulith library you can now persist such event and process them after a restart.\nA module can access the content of any other module but can't access sub-packages of other modules. A module also cant access content that is not public By default @EventListener run on the same thread as the caller, to run it asynchronously use @Async\n@ApplicationModuleListener by default comes with @Transactional, @Async \u0026amp; @TransactionalEventListener annotation enabled.\n@Externalized will publish the events to queues like RabbitMQ/Kafka.\nTo process the events on restart enable this flag.\n1spring: 2 modulith: 3 republish-outstanding-events-on-restart: true Code 1package com.demo.project73.common; 2 3import lombok.extern.slf4j.Slf4j; 4import org.springframework.boot.context.event.ApplicationReadyEvent; 5import org.springframework.context.event.EventListener; 6import org.springframework.stereotype.Service; 7 8@Service 9@Slf4j 10public class ApplicationEventListener { 11 /** 12 * ApplicationStartingEvent - fired at the start of a run but before any processing 13 * ApplicationEnvironmentPreparedEvent - fired when the Environment to be used in the context is available 14 * ApplicationContextInitializedEvent- fired when the ApplicationContext is ready 15 * ApplicationPreparedEvent - fired when ApplicationContext is prepared but not refreshed 16 * ContextRefreshedEvent - fired when an ApplicationContext is refreshed 17 * WebServerInitializedEvent - fired after the web server is ready 18 * ApplicationStartedEvent - fired after the context has been refreshed but before any application and command-line runners have been called 19 * ApplicationReadyEvent - fired to indicate that the application is ready to service 20 * ApplicationFailedEvent - fired if there is an exception and the application fails to start 21 */ 22 @EventListener(ApplicationReadyEvent.class) 23 public void onStart() { 24 log.info(\u0026#34;Triggered when application ready!\u0026#34;); 25 } 26} 1package com.demo.project73.audit.internal.listener; 2 3import com.demo.project73.common.OrderEvent; 4import lombok.extern.slf4j.Slf4j; 5import org.springframework.modulith.events.ApplicationModuleListener; 6import org.springframework.stereotype.Component; 7 8@Component 9@Slf4j 10public class AuditEventListener { 11 12 @ApplicationModuleListener 13 public void processOrderEvent(OrderEvent orderEvent) { 14 log.info(\u0026#34;[Audit] Order Event Received: {}\u0026#34;, orderEvent); 15 } 16 17} 1package com.demo.project73.order.internal.service; 2 3import java.util.UUID; 4 5import com.demo.project73.common.CustomEvent; 6import com.demo.project73.common.PrimeReward; 7import com.demo.project73.common.SeasonReward; 8import com.demo.project73.order.internal.domain.Order; 9import com.demo.project73.common.OrderEvent; 10import lombok.RequiredArgsConstructor; 11import lombok.extern.slf4j.Slf4j; 12import org.springframework.context.ApplicationEventPublisher; 13import org.springframework.stereotype.Service; 14import org.springframework.transaction.annotation.Transactional; 15 16@Service 17@Slf4j 18@RequiredArgsConstructor 19public class OrderService { 20 21 final ApplicationEventPublisher applicationEventPublisher; 22 23 /** 24 * @ApplicationModuleListener need a transactional boundary else won\u0026#39;t run. 25 */ 26 @Transactional 27 public Order placeOrder(Order order) { 28 for (String item : order.getItems()) { 29 OrderEvent orderEvent = OrderEvent.builder() 30 .orderId(order.getOrderId()) 31 .item(item) 32 .orderDate(order.getOrderDate()) 33 .build(); 34 log.info(\u0026#34;Publishing Order: {}\u0026#34;, orderEvent); 35 applicationEventPublisher.publishEvent(orderEvent); 36 } 37 38 PrimeReward coupon1 = PrimeReward.builder() 39 .id(UUID.randomUUID()) 40 .couponCode(\u0026#34;coupon-code-\u0026#34; + UUID.randomUUID()) 41 .build(); 42 SeasonReward coupon2 = SeasonReward.builder() 43 .id(UUID.randomUUID()) 44 .couponCode(\u0026#34;coupon-code-\u0026#34; + UUID.randomUUID()) 45 .build(); 46 CustomEvent\u0026lt;PrimeReward\u0026gt; customEvent1 = new CustomEvent(this, coupon1); 47 CustomEvent\u0026lt;SeasonReward\u0026gt; customEvent2 = new CustomEvent(this, coupon2); 48 log.info(\u0026#34;Publishing CustomEvent: {}\u0026#34;, customEvent1); 49 applicationEventPublisher.publishEvent(customEvent1); 50 log.info(\u0026#34;Publishing CustomEvent: {}\u0026#34;, customEvent2); 51 applicationEventPublisher.publishEvent(customEvent2); 52 return order; 53 } 54} 1package com.demo.project73.reward.internal.listener; 2 3import com.demo.project73.common.CustomEvent; 4import com.demo.project73.common.PrimeReward; 5import com.demo.project73.common.SeasonReward; 6import lombok.SneakyThrows; 7import lombok.extern.slf4j.Slf4j; 8import org.springframework.context.event.EventListener; 9import org.springframework.scheduling.annotation.Async; 10import org.springframework.stereotype.Component; 11import org.springframework.transaction.event.TransactionPhase; 12import org.springframework.transaction.event.TransactionalEventListener; 13 14@Component 15@Slf4j 16public class RewardListener { 17 /** 18 * Processes the custom event 19 */ 20 @Async 21 @SneakyThrows 22 @EventListener 23 public void processEvent(CustomEvent myEvent) { 24 log.info(\u0026#34;Processing CustomEvent {}\u0026#34;, myEvent); 25 if (myEvent.getEntity() instanceof PrimeReward) { 26 log.info(\u0026#34;PrimeReward Event: {}\u0026#34;, ((PrimeReward) myEvent.getEntity()).getCouponCode()); 27 } 28 if (myEvent.getEntity() instanceof SeasonReward) { 29 log.info(\u0026#34;SeasonReward Event: {}\u0026#34;, ((SeasonReward) myEvent.getEntity()).getCouponCode()); 30 } 31 } 32 33 /** 34 * AFTER_COMMIT: The event will be handled when the transaction gets committed successfully. 35 * AFTER_COMPLETION: The event will be handled when the transaction commits or is rolled back. 36 * AFTER_ROLLBACK: The event will be handled after the transaction has rolled back. 37 * BEFORE_COMMIT: The event will be handled before the transaction commit. 38 */ 39 @TransactionalEventListener(phase = TransactionPhase.AFTER_COMMIT) 40 void afterAuditEventProcessed(CustomEvent myEvent) { 41 log.info(\u0026#34;After CustomEvent processed: {}\u0026#34;, myEvent); 42 } 43} Setup 1# Project 73 2 3Spring Events 4 5[https://gitorko.github.io/spring-events/](https://gitorko.github.io/spring-events/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk 21.0.3 2024-04-16 LTS 14``` 15 16### Modulith Documentation 17 18```bash 19brew install graphviz 20``` 21 22### Postgres DB 23 24```bash 25docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:14 26docker ps 27docker exec -it pg-container psql -U postgres -W postgres 28CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 29CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 30grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 31 32docker stop pg-container 33docker start pg-container 34``` 35 36### RabbitMQ 37 38Run the docker command to start a rabbitmq instance 39 40```bash 41docker run -d --hostname my-rabbit --name my-rabbit -e RABBITMQ_DEFAULT_USER=guest -e RABBITMQ_DEFAULT_PASS=guest -p 8085:15672 -p 5672:5672 rabbitmq:3-management 42``` 43 44Open the rabbitmq console 45 46[http://localhost:8085](http://localhost:8085) 47 48``` 49user:guest 50pwd: guest 51``` 52 53### Dev 54 55To run the code. 56 57```bash 58./gradlew clean build 59./gradlew bootRun 60``` References https://spring.io/blog/2015/02/11/better-application-events-in-spring-framework-4-2\nhttps://spring.io/projects/spring-modulith\nhttps://github.com/xmolecules/jmolecules\nhttps://www.youtube.com/watch?v=Pae2D4XcEIg\n","link":"https://gitorko.github.io/post/spring-modulith/","section":"post","tags":["spring","spring-modulith","events","postgres","liquibase"],"title":"Spring Modulith - Events"},{"body":"","link":"https://gitorko.github.io/tags/spring-modulith/","section":"tags","tags":null,"title":"Spring-Modulith"},{"body":"","link":"https://gitorko.github.io/tags/jdbc/","section":"tags","tags":null,"title":"Jdbc"},{"body":"","link":"https://gitorko.github.io/categories/jpa/","section":"categories","tags":null,"title":"JPA"},{"body":"","link":"https://gitorko.github.io/tags/multi-tenancy/","section":"tags","tags":null,"title":"Multi-Tenancy"},{"body":"Spring JPA implementation with multi-tenancy and routing.\nGithub: https://github.com/gitorko/project101\nPostgres Multi-Tenancy Multi-tenancy is an architectural pattern that allows you to isolate customers even if they are using the same hardware or software components.\nCatalog-based - Each region gets its own database Schema-based - Single database but different schema for each region Table-based - Single database, single table but a column identifies the region. There are 2 approache to implement multi-tenancy\nAbstractMultiTenantConnectionProvider - Handles Hibernate session factory connections for different tenants. AbstractRoutingDataSource - Handles other aspects of data source routing in the application, such as switching data sources for non-Hibernate use cases. Code 1package com.demo.project101.config; 2 3import java.util.HashMap; 4import java.util.Map; 5import javax.sql.DataSource; 6 7import com.zaxxer.hikari.HikariConfig; 8import com.zaxxer.hikari.HikariDataSource; 9import org.springframework.beans.factory.annotation.Qualifier; 10import org.springframework.context.annotation.Bean; 11import org.springframework.context.annotation.Configuration; 12import org.springframework.core.env.Environment; 13import org.springframework.orm.jpa.JpaTransactionManager; 14import org.springframework.orm.jpa.LocalContainerEntityManagerFactoryBean; 15import org.springframework.orm.jpa.vendor.HibernateJpaVendorAdapter; 16import org.springframework.transaction.PlatformTransactionManager; 17 18@Configuration 19public class DataSourceConfig { 20 21 @Bean(name = \u0026#34;defaultDataSource\u0026#34;) 22 public DataSource defaultDataSource(Environment env) { 23 HikariConfig config = new HikariConfig(); 24 config.setJdbcUrl(env.getProperty(\u0026#34;spring.datasource.url\u0026#34;)); 25 config.setUsername(env.getProperty(\u0026#34;spring.datasource.username\u0026#34;)); 26 config.setPassword(env.getProperty(\u0026#34;spring.datasource.password\u0026#34;)); 27 config.setDriverClassName(env.getProperty(\u0026#34;spring.datasource.driver-class-name\u0026#34;)); 28 config.setMaximumPoolSize(Integer.parseInt(env.getProperty(\u0026#34;spring.datasource.hikari.maximum-pool-size\u0026#34;))); 29 config.setMinimumIdle(Integer.parseInt(env.getProperty(\u0026#34;spring.datasource.hikari.minimum-idle\u0026#34;))); 30 config.setIdleTimeout(Long.parseLong(env.getProperty(\u0026#34;spring.datasource.hikari.idle-timeout\u0026#34;))); 31 config.setMaxLifetime(Long.parseLong(env.getProperty(\u0026#34;spring.datasource.hikari.max-lifetime\u0026#34;))); 32 config.setConnectionTimeout(Long.parseLong(env.getProperty(\u0026#34;spring.datasource.hikari.connection-timeout\u0026#34;))); 33 return new HikariDataSource(config); 34 } 35 36 @Bean(name = \u0026#34;entityManagerFactory\u0026#34;) 37 public LocalContainerEntityManagerFactoryBean primaryEntityManagerFactory( 38 @Qualifier(\u0026#34;routingDataSource\u0026#34;) DataSource routingDataSource, Environment env) { 39 HibernateJpaVendorAdapter vendorAdapter = new HibernateJpaVendorAdapter(); 40 LocalContainerEntityManagerFactoryBean factory = new LocalContainerEntityManagerFactoryBean(); 41 factory.setDataSource(routingDataSource); 42 factory.setPackagesToScan(\u0026#34;com.demo.project101.domain\u0026#34;); 43 factory.setJpaVendorAdapter(vendorAdapter); 44 factory.setJpaPropertyMap(hibernateProperties(env)); 45 return factory; 46 } 47 48 @Bean(name = \u0026#34;transactionManager\u0026#34;) 49 public PlatformTransactionManager primaryTransactionManager( 50 @Qualifier(\u0026#34;entityManagerFactory\u0026#34;) LocalContainerEntityManagerFactoryBean primaryEntityManagerFactory) { 51 return new JpaTransactionManager(primaryEntityManagerFactory.getObject()); 52 } 53 54 private Map\u0026lt;String, Object\u0026gt; hibernateProperties(Environment env) { 55 Map\u0026lt;String, Object\u0026gt; properties = new HashMap\u0026lt;\u0026gt;(); 56 properties.put(\u0026#34;hibernate.hbm2ddl.auto\u0026#34;, env.getProperty(\u0026#34;spring.jpa.properties.hibernate.hbm2ddl.auto\u0026#34;)); 57 properties.put(\u0026#34;hibernate.dialect\u0026#34;, env.getProperty(\u0026#34;spring.jpa.properties.hibernate.dialect\u0026#34;)); 58 properties.put(\u0026#34;hibernate.show_sql\u0026#34;, env.getProperty(\u0026#34;spring.jpa.properties.hibernate.show_sql\u0026#34;)); 59 return properties; 60 } 61} 62 63 1package com.demo.project101.config; 2 3import java.util.List; 4import javax.sql.DataSource; 5 6import jakarta.annotation.PostConstruct; 7import liquibase.exception.LiquibaseException; 8import liquibase.integration.spring.SpringLiquibase; 9import org.springframework.beans.factory.annotation.Autowired; 10import org.springframework.beans.factory.annotation.Qualifier; 11import org.springframework.beans.factory.annotation.Value; 12import org.springframework.context.annotation.Configuration; 13 14@Configuration 15public class LiquibaseConfig { 16 17 @Autowired 18 @Qualifier(\u0026#34;defaultDataSource\u0026#34;) 19 private DataSource dataSource; 20 21 @Value(\u0026#34;${app.tenants}\u0026#34;) 22 private List\u0026lt;String\u0026gt; tenants; 23 24 @PostConstruct 25 public void applyLiquibase() throws LiquibaseException { 26 for (String tenant : tenants) { 27 SpringLiquibase liquibase = new SpringLiquibase(); 28 liquibase.setDataSource(dataSource); 29 liquibase.setDefaultSchema(tenant); 30 liquibase.setChangeLog(\u0026#34;classpath:db/changelog/db.changelog.yaml\u0026#34;); 31 liquibase.afterPropertiesSet(); 32 } 33 } 34} 35 1package com.demo.project101.config; 2 3import java.util.HashMap; 4import java.util.List; 5import java.util.Map; 6import javax.sql.DataSource; 7 8import com.zaxxer.hikari.HikariConfig; 9import com.zaxxer.hikari.HikariDataSource; 10import lombok.extern.slf4j.Slf4j; 11import org.springframework.beans.factory.annotation.Qualifier; 12import org.springframework.beans.factory.annotation.Value; 13import org.springframework.context.annotation.Bean; 14import org.springframework.context.annotation.Configuration; 15import org.springframework.core.env.Environment; 16import org.springframework.jdbc.datasource.lookup.AbstractRoutingDataSource; 17 18@Configuration 19@Slf4j 20public class RoutingConfig { 21 22 @Value(\u0026#34;${app.tenants}\u0026#34;) 23 private List\u0026lt;String\u0026gt; tenants; 24 25 @Bean(name = \u0026#34;routingDataSource\u0026#34;) 26 public DataSource routingDataSource(@Qualifier(\u0026#34;defaultDataSource\u0026#34;) DataSource defaultDataSource, Environment env) { 27 Map\u0026lt;Object, Object\u0026gt; targetDataSources = new HashMap\u0026lt;\u0026gt;(); 28 for (String tenant : tenants) { 29 targetDataSources.put(tenant, createTenantDataSource(tenant, env)); 30 } 31 AbstractRoutingDataSource routingDataSource = new AbstractRoutingDataSource() { 32 @Override 33 protected Object determineCurrentLookupKey() { 34 return TenantContext.getCurrentTenant(); 35 } 36 }; 37 routingDataSource.setDefaultTargetDataSource(defaultDataSource); 38 routingDataSource.setTargetDataSources(targetDataSources); 39 return routingDataSource; 40 } 41 42 public DataSource createTenantDataSource(String tenant, Environment env) { 43 log.info(\u0026#34;New Connection: {}\u0026#34;, tenant); 44 HikariConfig config = new HikariConfig(); 45 config.setJdbcUrl(env.getProperty(\u0026#34;spring.datasource.url\u0026#34;) + \u0026#34;?currentSchema=\u0026#34; + tenant); 46 config.setUsername(env.getProperty(\u0026#34;spring.datasource.username\u0026#34;)); 47 config.setPassword(env.getProperty(\u0026#34;spring.datasource.password\u0026#34;)); 48 config.setDriverClassName(env.getProperty(\u0026#34;spring.datasource.driver-class-name\u0026#34;)); 49 config.setMaximumPoolSize(Integer.parseInt(env.getProperty(\u0026#34;spring.datasource.hikari.maximum-pool-size\u0026#34;))); 50 config.setMinimumIdle(Integer.parseInt(env.getProperty(\u0026#34;spring.datasource.hikari.minimum-idle\u0026#34;))); 51 config.setIdleTimeout(Long.parseLong(env.getProperty(\u0026#34;spring.datasource.hikari.idle-timeout\u0026#34;))); 52 config.setMaxLifetime(Long.parseLong(env.getProperty(\u0026#34;spring.datasource.hikari.max-lifetime\u0026#34;))); 53 config.setConnectionTimeout(Long.parseLong(env.getProperty(\u0026#34;spring.datasource.hikari.connection-timeout\u0026#34;))); 54 return new HikariDataSource(config); 55 } 56} 1package com.demo.project101.config; 2 3public class TenantContext { 4 5 private static final ThreadLocal\u0026lt;String\u0026gt; CURRENT_TENANT = new ThreadLocal\u0026lt;\u0026gt;(); 6 7 public static String getCurrentTenant() { 8 return CURRENT_TENANT.get(); 9 } 10 11 public static void setCurrentTenant(String tenant) { 12 CURRENT_TENANT.set(tenant); 13 } 14 15 public static void clear() { 16 CURRENT_TENANT.remove(); 17 } 18} 1package com.demo.project101.controller; 2 3import java.util.List; 4 5import com.demo.project101.config.TenantContext; 6import com.demo.project101.domain.Customer; 7import com.demo.project101.service.CustomerService; 8import lombok.RequiredArgsConstructor; 9import org.springframework.web.bind.annotation.GetMapping; 10import org.springframework.web.bind.annotation.PostMapping; 11import org.springframework.web.bind.annotation.RequestBody; 12import org.springframework.web.bind.annotation.RequestHeader; 13import org.springframework.web.bind.annotation.RequestMapping; 14import org.springframework.web.bind.annotation.RestController; 15 16@RestController 17@RequestMapping(\u0026#34;/customer\u0026#34;) 18@RequiredArgsConstructor 19public class CustomerController { 20 final CustomerService customerService; 21 22 @GetMapping 23 public List\u0026lt;Customer\u0026gt; getAll(@RequestHeader(\u0026#34;X-TenantID\u0026#34;) String tenantId) { 24 try { 25 TenantContext.setCurrentTenant(tenantId); 26 return customerService.findAll(); 27 } finally { 28 TenantContext.clear(); 29 } 30 } 31 32 @PostMapping 33 public Customer saveCustomer(@RequestHeader(\u0026#34;X-TenantID\u0026#34;) String tenantId, @RequestBody Customer customer) { 34 try { 35 TenantContext.setCurrentTenant(tenantId); 36 return customerService.save(customer); 37 } finally { 38 TenantContext.clear(); 39 } 40 } 41} Postman Import the postman collection to postman\nPostman Collection\nSetup 1# Project101 2 3Spring Boot \u0026amp; Postgres - Multi-tenancy \u0026amp; Routing 4 5### Version 6 7Check version 8 9```bash 10$java --version 11openjdk version \u0026#34;21.0.3\u0026#34; 2024-04-16 LTS 12``` 13 14### Postgres DB 15 16``` 17docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:14 18docker ps 19docker exec -it pg-container psql -U postgres -W postgres 20CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 21CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 22grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 23``` 24 25Schema 26 27```bash 28 29CREATE SCHEMA america; 30CREATE SCHEMA asia; 31 32select nspname as \u0026#34;Schema\u0026#34; 33from pg_catalog.pg_namespace 34where nspname not like \u0026#39;pg_%\u0026#39;; 35``` 36 37```bash 38docker stop pg-container 39docker start pg-container 40``` 41 42### Dev 43 44To run the backend in dev mode. 45 46```bash 47./gradlew clean build 48./gradlew bootRun 49``` References https://spring.io\nhttps://vladmihalcea.com/database-multitenancy/\nhttps://vladmihalcea.com/hibernate-database-schema-multitenancy/\nhttps://vladmihalcea.com/read-write-read-only-transaction-routing-spring/\n","link":"https://gitorko.github.io/post/multi-tenancy-routing/","section":"post","tags":["jdbc","multi-tenancy","liquibase"],"title":"Spring Boot \u0026 Postgres - Multi-Tenancy \u0026 Routing"},{"body":"","link":"https://gitorko.github.io/categories/postgres/","section":"categories","tags":null,"title":"Postgres"},{"body":"Spring boot application with postgres text search implementation\nGithub: https://github.com/gitorko/project103\nText Search Using like keyword for search is not efficient for text search. There is no ranking and no indexes can be used.\n1select * from customer where description like \u0026#39;%play%\u0026#39;; Elastic search can also be used to search text for large scale. For simpler small scale text search you can use postgres and leverage existing database.\nto_tsvector - Will remove stop words, find lexical words, adds positions to_tsquery - Will search the tsvector gin - generalized inverted index will be created to search.\nSQL queries\n1select description::tsvector 2from customer; 3 4select to_tsvector(description) 5from customer; 6 7select to_tsquery(\u0026#39;Loves\u0026#39;) 8from customer; 9 10select websearch_to_tsquery(\u0026#39;Loves and Skating\u0026#39;) 11from customer; 12 13select to_tsvector(description) @@ websearch_to_tsquery(\u0026#39;Loves and Skating\u0026#39;) 14from customer; 15 16select * 17from customer 18where to_tsvector(name || \u0026#39; \u0026#39; || coalesce(description, \u0026#39;\u0026#39;)) @@ websearch_to_tsquery(\u0026#39;Loves and Skating\u0026#39;); 19 20select *, ts_rank(to_tsvector(name || \u0026#39; \u0026#39; || coalesce(description, \u0026#39;\u0026#39;)), websearch_to_tsquery(\u0026#39;Loves and Skating\u0026#39;)) as rank 21from customer 22where to_tsvector(name || \u0026#39; \u0026#39; || coalesce(description, \u0026#39;\u0026#39;)) @@ websearch_to_tsquery(\u0026#39;Loves and Skating\u0026#39;) 23order by rank desc; Code 1CREATE TABLE customer ( 2 id bigserial PRIMARY KEY, 3 name varchar(255) UNIQUE NOT NULL, 4 description text, 5 tsv tsvector generated always as ( to_tsvector(\u0026#39;english\u0026#39;::regconfig, name || \u0026#39; \u0026#39; || coalesce(description, \u0026#39;\u0026#39;)) ) stored 6); 1package com.demo.project103.repository; 2 3import java.util.List; 4 5import com.demo.project103.domain.Customer; 6import org.springframework.data.jpa.repository.JpaRepository; 7import org.springframework.data.jpa.repository.Query; 8import org.springframework.data.repository.query.Param; 9import org.springframework.stereotype.Repository; 10 11@Repository 12public interface CustomerRepository extends JpaRepository\u0026lt;Customer, Long\u0026gt; { 13 14 @Query(value = \u0026#34;\u0026#34;\u0026#34; 15 select * from customer where 16 tsv @@ websearch_to_tsquery(:search); 17 \u0026#34;\u0026#34;\u0026#34;, nativeQuery = true) 18 List\u0026lt;Customer\u0026gt; searchByText(@Param(\u0026#34;search\u0026#34;) String search); 19 20 @Query(value = \u0026#34;\u0026#34;\u0026#34; 21 select *, ts_rank(tsv, websearch_to_tsquery(:search)) as rank 22 from customer 23 where tsv @@ websearch_to_tsquery(:search) 24 order by rank desc; 25 \u0026#34;\u0026#34;\u0026#34;, nativeQuery = true) 26 List\u0026lt;Customer\u0026gt; rankSearchByText(@Param(\u0026#34;search\u0026#34;) String search); 27} Postman Import the postman collection to postman\nPostman Collection\nSetup 1# Project103 2 3Spring Boot \u0026amp; Postgres - Text Search 4 5### Version 6 7Check version 8 9```bash 10$java --version 11openjdk version \u0026#34;21.0.3\u0026#34; 2024-04-16 LTS 12``` 13 14### Postgres DB 15 16``` 17docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:14 18docker ps 19docker exec -it pg-container psql -U postgres -W postgres 20CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 21CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 22grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 23``` 24 25Schema 26 27```bash 28 29CREATE SCHEMA america; 30CREATE SCHEMA asia; 31 32select nspname as \u0026#34;Schema\u0026#34; 33from pg_catalog.pg_namespace 34where nspname not like \u0026#39;pg_%\u0026#39;; 35``` 36 37```bash 38docker stop pg-container 39docker start pg-container 40``` 41 42### Dev 43 44To run the backend in dev mode. 45 46```bash 47./gradlew clean build 48./gradlew bootRun 49``` References https://www.postgresql.org/docs/current/textsearch.html\n","link":"https://gitorko.github.io/post/spring-postgres-text-search/","section":"post","tags":["jdbc","text-search","liquibase"],"title":"Spring Boot \u0026 Postgres - Text Search"},{"body":"","link":"https://gitorko.github.io/tags/text-search/","section":"tags","tags":null,"title":"Text-Search"},{"body":"","link":"https://gitorko.github.io/tags/cqrs/","section":"tags","tags":null,"title":"Cqrs"},{"body":"","link":"https://gitorko.github.io/categories/cqrs/","section":"categories","tags":null,"title":"CQRS"},{"body":"","link":"https://gitorko.github.io/tags/leader-follower/","section":"tags","tags":null,"title":"Leader-Follower"},{"body":"","link":"https://gitorko.github.io/categories/liquibase/","section":"categories","tags":null,"title":"Liquibase"},{"body":"","link":"https://gitorko.github.io/tags/multi-database/","section":"tags","tags":null,"title":"Multi-Database"},{"body":"Spring boot implementation of CQRS pattern\nGithub: https://github.com/gitorko/project99\nMain Topic CQRS (Command and Query Responsibility Segregation) a pattern that separates read and update operations for different data store. This maximizes application performance, scalability, and security. We will start 2 database servers where writes goto the primary database and reads are done on the secondary database. Replication happens from primary db to secondary db. This is an AP model (CAP Theorem) as replication will result in eventual consistency.\nCode 1package com.demo.project99.config; 2 3import java.util.HashMap; 4import java.util.Map; 5import javax.sql.DataSource; 6 7import com.zaxxer.hikari.HikariConfig; 8import com.zaxxer.hikari.HikariDataSource; 9import liquibase.integration.spring.SpringLiquibase; 10import org.springframework.beans.factory.annotation.Qualifier; 11import org.springframework.context.annotation.Bean; 12import org.springframework.context.annotation.Configuration; 13import org.springframework.core.env.Environment; 14import org.springframework.data.jpa.repository.config.EnableJpaRepositories; 15import org.springframework.orm.jpa.JpaTransactionManager; 16import org.springframework.orm.jpa.LocalContainerEntityManagerFactoryBean; 17import org.springframework.orm.jpa.vendor.HibernateJpaVendorAdapter; 18import org.springframework.transaction.PlatformTransactionManager; 19import org.springframework.transaction.annotation.EnableTransactionManagement; 20 21@Configuration 22@EnableTransactionManagement 23@EnableJpaRepositories( 24 basePackages = \u0026#34;com.demo.project99.repository.primary\u0026#34;, 25 entityManagerFactoryRef = \u0026#34;primaryEntityManagerFactory\u0026#34;, 26 transactionManagerRef = \u0026#34;primaryTransactionManager\u0026#34; 27) 28public class PrimaryDataSourceConfig { 29 30 @Bean(name = \u0026#34;primaryDataSource\u0026#34;) 31 public DataSource primaryDataSource(Environment env) { 32 HikariConfig config = new HikariConfig(); 33 config.setJdbcUrl(env.getProperty(\u0026#34;spring.datasource.primary.url\u0026#34;)); 34 config.setUsername(env.getProperty(\u0026#34;spring.datasource.primary.username\u0026#34;)); 35 config.setPassword(env.getProperty(\u0026#34;spring.datasource.primary.password\u0026#34;)); 36 config.setDriverClassName(env.getProperty(\u0026#34;spring.datasource.primary.driver-class-name\u0026#34;)); 37 config.setMaximumPoolSize(Integer.parseInt(env.getProperty(\u0026#34;spring.datasource.primary.hikari.maximum-pool-size\u0026#34;))); 38 config.setMinimumIdle(Integer.parseInt(env.getProperty(\u0026#34;spring.datasource.primary.hikari.minimum-idle\u0026#34;))); 39 config.setIdleTimeout(Long.parseLong(env.getProperty(\u0026#34;spring.datasource.primary.hikari.idle-timeout\u0026#34;))); 40 config.setMaxLifetime(Long.parseLong(env.getProperty(\u0026#34;spring.datasource.primary.hikari.max-lifetime\u0026#34;))); 41 config.setConnectionTimeout(Long.parseLong(env.getProperty(\u0026#34;spring.datasource.primary.hikari.connection-timeout\u0026#34;))); 42 return new HikariDataSource(config); 43 } 44 45 @Bean(name = \u0026#34;primaryEntityManagerFactory\u0026#34;) 46 public LocalContainerEntityManagerFactoryBean primaryEntityManagerFactory( 47 @Qualifier(\u0026#34;primaryDataSource\u0026#34;) DataSource primaryDataSource, Environment env) { 48 HibernateJpaVendorAdapter vendorAdapter = new HibernateJpaVendorAdapter(); 49 LocalContainerEntityManagerFactoryBean factory = new LocalContainerEntityManagerFactoryBean(); 50 factory.setDataSource(primaryDataSource); 51 factory.setPackagesToScan(\u0026#34;com.demo.project99.domain.primary\u0026#34;); 52 factory.setJpaVendorAdapter(vendorAdapter); 53 factory.setJpaPropertyMap(hibernateProperties(env)); 54 return factory; 55 } 56 57 @Bean(name = \u0026#34;primaryTransactionManager\u0026#34;) 58 public PlatformTransactionManager primaryTransactionManager( 59 @Qualifier(\u0026#34;primaryEntityManagerFactory\u0026#34;) LocalContainerEntityManagerFactoryBean primaryEntityManagerFactory) { 60 return new JpaTransactionManager(primaryEntityManagerFactory.getObject()); 61 } 62 63 @Bean 64 public SpringLiquibase primaryLiquibase(@Qualifier(\u0026#34;primaryDataSource\u0026#34;) DataSource primaryDataSource) { 65 SpringLiquibase liquibase = new SpringLiquibase(); 66 liquibase.setDataSource(primaryDataSource); 67 liquibase.setChangeLog(\u0026#34;classpath:db/changelog/db.changelog-primary.yaml\u0026#34;); 68 liquibase.setContexts(\u0026#34;primary\u0026#34;); 69 return liquibase; 70 } 71 72 private Map\u0026lt;String, Object\u0026gt; hibernateProperties(Environment env) { 73 Map\u0026lt;String, Object\u0026gt; properties = new HashMap\u0026lt;\u0026gt;(); 74 properties.put(\u0026#34;hibernate.hbm2ddl.auto\u0026#34;, env.getProperty(\u0026#34;spring.jpa.properties.hibernate.hbm2ddl.auto\u0026#34;)); 75 properties.put(\u0026#34;hibernate.dialect\u0026#34;, env.getProperty(\u0026#34;spring.jpa.properties.hibernate.dialect\u0026#34;)); 76 properties.put(\u0026#34;hibernate.show_sql\u0026#34;, env.getProperty(\u0026#34;spring.jpa.properties.hibernate.show_sql\u0026#34;)); 77 return properties; 78 } 79} 80 81 1package com.demo.project99.config; 2 3import java.util.HashMap; 4import java.util.Map; 5import javax.sql.DataSource; 6 7import com.zaxxer.hikari.HikariConfig; 8import com.zaxxer.hikari.HikariDataSource; 9import liquibase.integration.spring.SpringLiquibase; 10import org.springframework.beans.factory.annotation.Qualifier; 11import org.springframework.boot.jdbc.DataSourceBuilder; 12import org.springframework.context.annotation.Bean; 13import org.springframework.context.annotation.Configuration; 14import org.springframework.core.env.Environment; 15import org.springframework.data.jpa.repository.config.EnableJpaRepositories; 16import org.springframework.orm.jpa.JpaTransactionManager; 17import org.springframework.orm.jpa.LocalContainerEntityManagerFactoryBean; 18import org.springframework.orm.jpa.vendor.HibernateJpaVendorAdapter; 19import org.springframework.transaction.PlatformTransactionManager; 20import org.springframework.transaction.annotation.EnableTransactionManagement; 21 22@Configuration 23@EnableTransactionManagement 24@EnableJpaRepositories( 25 basePackages = \u0026#34;com.demo.project99.repository.secondary\u0026#34;, 26 entityManagerFactoryRef = \u0026#34;secondaryEntityManagerFactory\u0026#34;, 27 transactionManagerRef = \u0026#34;secondaryTransactionManager\u0026#34; 28) 29public class SecondaryDataSourceConfig { 30 31 @Bean(name = \u0026#34;secondaryDataSource\u0026#34;) 32 public DataSource secondaryDataSource(Environment env) { 33 HikariConfig config = new HikariConfig(); 34 config.setJdbcUrl(env.getProperty(\u0026#34;spring.datasource.secondary.url\u0026#34;)); 35 config.setUsername(env.getProperty(\u0026#34;spring.datasource.secondary.username\u0026#34;)); 36 config.setPassword(env.getProperty(\u0026#34;spring.datasource.secondary.password\u0026#34;)); 37 config.setDriverClassName(env.getProperty(\u0026#34;spring.datasource.secondary.driver-class-name\u0026#34;)); 38 config.setMaximumPoolSize(Integer.parseInt(env.getProperty(\u0026#34;spring.datasource.secondary.hikari.maximum-pool-size\u0026#34;))); 39 config.setMinimumIdle(Integer.parseInt(env.getProperty(\u0026#34;spring.datasource.secondary.hikari.minimum-idle\u0026#34;))); 40 config.setIdleTimeout(Long.parseLong(env.getProperty(\u0026#34;spring.datasource.secondary.hikari.idle-timeout\u0026#34;))); 41 config.setMaxLifetime(Long.parseLong(env.getProperty(\u0026#34;spring.datasource.secondary.hikari.max-lifetime\u0026#34;))); 42 config.setConnectionTimeout(Long.parseLong(env.getProperty(\u0026#34;spring.datasource.secondary.hikari.connection-timeout\u0026#34;))); 43 return new HikariDataSource(config); 44 } 45 46 @Bean(name = \u0026#34;secondaryEntityManagerFactory\u0026#34;) 47 public LocalContainerEntityManagerFactoryBean secondaryEntityManagerFactory( 48 @Qualifier(\u0026#34;secondaryDataSource\u0026#34;) DataSource secondaryDataSource, Environment env) { 49 HibernateJpaVendorAdapter vendorAdapter = new HibernateJpaVendorAdapter(); 50 LocalContainerEntityManagerFactoryBean factory = new LocalContainerEntityManagerFactoryBean(); 51 factory.setDataSource(secondaryDataSource); 52 factory.setPackagesToScan(\u0026#34;com.demo.project99.domain.secondary\u0026#34;); 53 factory.setJpaVendorAdapter(vendorAdapter); 54 factory.setJpaPropertyMap(hibernateProperties(env)); 55 return factory; 56 } 57 58 @Bean(name = \u0026#34;secondaryTransactionManager\u0026#34;) 59 public PlatformTransactionManager secondaryTransactionManager( 60 @Qualifier(\u0026#34;secondaryEntityManagerFactory\u0026#34;) LocalContainerEntityManagerFactoryBean secondaryEntityManagerFactory) { 61 return new JpaTransactionManager(secondaryEntityManagerFactory.getObject()); 62 } 63 64 @Bean 65 public SpringLiquibase secondaryLiquibase(@Qualifier(\u0026#34;secondaryDataSource\u0026#34;) DataSource secondaryDataSource) { 66 SpringLiquibase liquibase = new SpringLiquibase(); 67 liquibase.setDataSource(secondaryDataSource); 68 liquibase.setChangeLog(\u0026#34;classpath:db/changelog/db.changelog-secondary.yaml\u0026#34;); 69 liquibase.setContexts(\u0026#34;secondary\u0026#34;); 70 return liquibase; 71 } 72 73 private Map\u0026lt;String, Object\u0026gt; hibernateProperties(Environment env) { 74 Map\u0026lt;String, Object\u0026gt; properties = new HashMap\u0026lt;\u0026gt;(); 75 properties.put(\u0026#34;hibernate.hbm2ddl.auto\u0026#34;, env.getProperty(\u0026#34;spring.jpa.properties.hibernate.hbm2ddl.auto\u0026#34;)); 76 properties.put(\u0026#34;hibernate.dialect\u0026#34;, env.getProperty(\u0026#34;spring.jpa.properties.hibernate.dialect\u0026#34;)); 77 properties.put(\u0026#34;hibernate.show_sql\u0026#34;, env.getProperty(\u0026#34;spring.jpa.properties.hibernate.show_sql\u0026#34;)); 78 return properties; 79 } 80 81 /** 82 * Non connection pool approach to get data source. 83 */ 84 private DataSource createDataSource(Environment env) { 85 return DataSourceBuilder.create() 86 .url(env.getProperty(\u0026#34;spring.datasource.secondary.url\u0026#34;)) 87 .username(env.getProperty(\u0026#34;spring.datasource.secondary.username\u0026#34;)) 88 .password(env.getProperty(\u0026#34;spring.datasource.secondary.password\u0026#34;)) 89 .driverClassName(env.getProperty(\u0026#34;spring.datasource.secondary.driver-class-name\u0026#34;)) 90 .build(); 91 92 } 93} 1package com.demo.project99.service; 2 3import java.util.List; 4 5import com.demo.project99.domain.primary.EmployeeWrite; 6import com.demo.project99.domain.secondary.EmployeeRead; 7import com.demo.project99.repository.primary.PrimaryEmployeeRepository; 8import com.demo.project99.repository.secondary.SecondaryEmployeeRepository; 9import lombok.RequiredArgsConstructor; 10import org.springframework.stereotype.Service; 11 12@Service 13@RequiredArgsConstructor 14public class EmployeeService { 15 16 final PrimaryEmployeeRepository primaryEmployeeRepository; 17 18 final SecondaryEmployeeRepository secondaryEmployeeRepository; 19 20 public EmployeeWrite saveEmployee(EmployeeWrite employeeWrite) { 21 return primaryEmployeeRepository.save(employeeWrite); 22 } 23 24 public List\u0026lt;EmployeeRead\u0026gt; getAllEmployees() { 25 return secondaryEmployeeRepository.findAll(); 26 } 27} Postman Import the postman collection to postman\nPostman Collection\nSetup 1# Project 05 2 3Spring Boot Postgres - CQRS (Multiple Database) 4 5[https://gitorko.github.io/post/distributed-locking-postgres](https://gitorko.github.io/post/distributed-locking-postgres) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk 21.0.3 2024-04-16 LTS 14``` 15 16### Postgres DB 17 18Start 2 Database Servers 19 20```bash 21docker-compose -f docker/docker-compose.yaml up -d 22``` 23 24Enable replication after liquibase creates the tables, run this only after spring boot application is started. 25 26```bash 27docker-compose -f docker/enable-publication.yaml up -d 28docker-compose -f docker/enable-subscription.yaml up -d 29``` 30 31Command to verify replication 32 33```bash 34docker exec -it pg-source psql -U test -d source_db -c \u0026#34;SELECT * FROM pg_roles WHERE rolname = \u0026#39;replicator\u0026#39;;\u0026#34; 35docker exec -it pg-source psql -U test -d source_db -c \u0026#34;SELECT * FROM pg_publication;\u0026#34; 36docker exec -it pg-target psql -U test -d target_db -c \u0026#34;SELECT * FROM pg_subscription;\u0026#34; 37 38docker exec -it pg-source psql -U test -d source_db -c \u0026#34;SELECT * FROM pg_replication_slots;\u0026#34; 39docker exec -it pg-target psql -U test -d target_db -c \u0026#34;SELECT * FROM pg_stat_subscription;\u0026#34; 40 41docker exec -it pg-source psql -U test -d source_db -c \u0026#34;SELECT * FROM pg_create_logical_replication_slot(\u0026#39;employee_slot\u0026#39;, \u0026#39;pgoutput\u0026#39;);\u0026#34; 42``` 43 44Clean up 45 46```bash 47docker-compose -f docker/docker-compose.yaml down --rmi all --remove-orphans --volumes 48``` 49 50### Dev 51 52To run the backend in dev mode. 53 54```bash 55./gradlew clean build 56./gradlew bootRun 57``` References https://spring.io\n","link":"https://gitorko.github.io/post/spring-postgres-cqrs/","section":"post","tags":["jdbc","webflux","cqrs","multi-database","liquibase","leader-follower"],"title":"Spring Boot \u0026 Postgres - CQRS (Multiple Database)"},{"body":"","link":"https://gitorko.github.io/tags/webflux/","section":"tags","tags":null,"title":"Webflux"},{"body":"","link":"https://gitorko.github.io/categories/caching/","section":"categories","tags":null,"title":"Caching"},{"body":"","link":"https://gitorko.github.io/tags/ehcache/","section":"tags","tags":null,"title":"Ehcache"},{"body":"Spring Boot 3 with EhCache 3\nGithub: https://github.com/gitorko/project98\nEhCache EhCache is an open-source cache library. Ehcache version 3 provides an implementation of a JSR-107 cache manager. It supports cache in memory and disk, It supports eviction policies such as LRU, LFU, FIFO. Ehcache uses Last Recently Used (LRU) eviction strategy for memory \u0026amp; Last Frequently Used (LFU) as the eviction strategy for disk store.\nCaching HashMap vs Cache\nDisadvantage of using hashmap over cache is that hashmap can cause memory overflow without eviction \u0026amp; doesn't support write to disk.\nEhcache will only evict elements when putting elements and your cache is above threshold. Otherwise, accessing those expired elements will result in them being expired (and removed from the Cache). There is no thread that collects and removes expired elements from the Cache in the background.\nTypes of store\nOn-Heap Store - stores cache entries in Java heap memory Off-Heap Store - primary memory (RAM) to store cache entries, cache entries will be moved to the on-heap memory automatically before they can be used. Disk Store - uses a hard disk to store cache entries. SSD type disk would perform better. Clustered Store - stores cache entries on the remote server Memory areas supported by Ehcache\nOn-Heap Store: Uses the Java heap memory to store cache entries and shares the memory with the application. The cache is also scanned by the garbage collection. This memory is very fast, but also very limited. Off-Heap Store: Uses the RAM to store cache entries. This memory is not subject to garbage collection. Still quite fast memory, but slower than the on-heap memory, because the cache entries have to be moved to the on-heap memory before they can be used. Disk Store: Uses the hard disk to store cache entries. Much slower than RAM. It is recommended to use a dedicated SSD that is only used for caching. Caching Strategies\nRead heavy caching strategies\nRead-Cache-aside - Application queries the cache. If the data is found, it returns the data directly. If not it fetches the data from the SoR, stores it into the cache, and then returns. Read-Through - Application queries the cache, cache service queries the SoR if not present and updates the cache and returns. Write heavy caching strategies\nWrite-Around - Application writes to db and to the cache. Write-Behind / Write-Back - Application writes to cache. Cache is pushed to SoR after some delay periodically. Write-through - Application writes to cache, cache service immediately writes to SoR. Spring Caching @Cacheable vs @CachePut\n@Cacheable will skip running the method, whereas @CachePut will actually run the method and then put its results in the cache.\nYou can also use CacheEventListener to track events like CREATED, UPDATED, EXPIRED, REMOVED.\nEhcache uses Last Recently Used (LRU) as the default eviction strategy for the memory stores when the cache is full. If a disk store is used and this is full it uses Last Frequently Used (LFU) as the eviction strategy.\nYou can enable spring actuator and look at the cache metrics\nThe @CacheConfig annotation allows us to define certain cache configurations at the class level. This is useful if certain cache settings are common for all methods.\n1@CacheConfig(cacheNames = \u0026#34;customerCache\u0026#34;) Code 1package com.demo.project98.config; 2 3import static org.ehcache.config.builders.CacheEventListenerConfigurationBuilder.newEventListenerConfiguration; 4import static org.ehcache.event.EventType.CREATED; 5import static org.ehcache.event.EventType.EXPIRED; 6import static org.ehcache.event.EventType.REMOVED; 7import static org.ehcache.event.EventType.UPDATED; 8 9import java.math.BigDecimal; 10import java.time.Duration; 11import java.util.Arrays; 12import javax.cache.CacheManager; 13import javax.cache.Caching; 14import javax.cache.spi.CachingProvider; 15 16import com.demo.project98.domain.Country; 17import com.demo.project98.domain.Customer; 18import com.demo.project98.listener.CountryCacheListener; 19import lombok.RequiredArgsConstructor; 20import org.ehcache.config.CacheConfiguration; 21import org.ehcache.config.builders.CacheConfigurationBuilder; 22import org.ehcache.config.builders.ExpiryPolicyBuilder; 23import org.ehcache.config.builders.ResourcePoolsBuilder; 24import org.ehcache.config.units.MemoryUnit; 25import org.ehcache.impl.config.event.DefaultCacheEventListenerConfiguration; 26import org.ehcache.jsr107.Eh107Configuration; 27import org.springframework.cache.annotation.EnableCaching; 28import org.springframework.cache.concurrent.ConcurrentMapCache; 29import org.springframework.cache.support.SimpleCacheManager; 30import org.springframework.context.annotation.Bean; 31import org.springframework.context.annotation.Configuration; 32 33@Configuration 34@EnableCaching 35@RequiredArgsConstructor 36public class CacheConfig { 37 38 private final CountryCacheListener listener; 39 40 @Bean 41 public CacheManager echCacheManager() { 42 CachingProvider cachingProvider = Caching.getCachingProvider(); 43 CacheManager cacheManager = cachingProvider.getCacheManager(); 44 cacheManager.createCache(\u0026#34;customerCache\u0026#34;, customerCacheConfig()); 45 cacheManager.createCache(\u0026#34;countryCache\u0026#34;, countryCacheConfig()); 46 cacheManager.createCache(\u0026#34;squareCache\u0026#34;, squareCacheConfig()); 47 return cacheManager; 48 } 49 50 private javax.cache.configuration.Configuration\u0026lt;Long, Customer\u0026gt; customerCacheConfig() { 51 CacheConfiguration\u0026lt;Long, Customer\u0026gt; cacheConfig = CacheConfigurationBuilder 52 .newCacheConfigurationBuilder(Long.class, Customer.class, 53 ResourcePoolsBuilder.newResourcePoolsBuilder() 54 .heap(10) 55 .offheap(10, MemoryUnit.MB) 56 .build()) 57 .withExpiry(ExpiryPolicyBuilder.timeToIdleExpiration(Duration.ofSeconds(10))) 58 .build(); 59 javax.cache.configuration.Configuration\u0026lt;Long, Customer\u0026gt; configuration = Eh107Configuration.fromEhcacheCacheConfiguration(cacheConfig); 60 return configuration; 61 } 62 63 private javax.cache.configuration.Configuration\u0026lt;String, Country\u0026gt; countryCacheConfig() { 64 CacheConfiguration\u0026lt;String, Country\u0026gt; cacheConfig = CacheConfigurationBuilder 65 .newCacheConfigurationBuilder(String.class, Country.class, 66 ResourcePoolsBuilder.newResourcePoolsBuilder() 67 .heap(10) 68 .offheap(10, MemoryUnit.MB) 69 .build()) 70 .withExpiry(ExpiryPolicyBuilder.timeToIdleExpiration(Duration.ofSeconds(10))) 71 .withService(getCacheEventListener()) 72 .build(); 73 javax.cache.configuration.Configuration\u0026lt;String, Country\u0026gt; configuration = Eh107Configuration.fromEhcacheCacheConfiguration(cacheConfig); 74 return configuration; 75 } 76 77 private javax.cache.configuration.Configuration\u0026lt;Long, BigDecimal\u0026gt; squareCacheConfig() { 78 CacheConfiguration\u0026lt;Long, BigDecimal\u0026gt; cacheConfig = CacheConfigurationBuilder 79 .newCacheConfigurationBuilder(Long.class, BigDecimal.class, 80 ResourcePoolsBuilder.newResourcePoolsBuilder() 81 .heap(10) 82 .offheap(10, MemoryUnit.MB) 83 .build()) 84 .build(); 85 javax.cache.configuration.Configuration\u0026lt;Long, BigDecimal\u0026gt; configuration = Eh107Configuration.fromEhcacheCacheConfiguration(cacheConfig); 86 return configuration; 87 } 88 89 private DefaultCacheEventListenerConfiguration getCacheEventListener() { 90 return newEventListenerConfiguration(listener, CREATED, UPDATED, EXPIRED, REMOVED) 91 .asynchronous() 92 .unordered() 93 .build(); 94 } 95 96 /** 97 * Use when no configurations. 98 */ 99 public SimpleCacheManager simpleEhCacheManager() { 100 SimpleCacheManager cacheManager = new SimpleCacheManager(); 101 cacheManager.setCaches(Arrays.asList(new ConcurrentMapCache(\u0026#34;customerCache\u0026#34;), new ConcurrentMapCache(\u0026#34;countryCache\u0026#34;))); 102 return cacheManager; 103 } 104 105} 1package com.demo.project98.listener; 2 3import com.demo.project98.domain.Country; 4import lombok.extern.slf4j.Slf4j; 5import org.ehcache.event.CacheEvent; 6import org.ehcache.event.CacheEventListener; 7import org.springframework.stereotype.Component; 8 9@Slf4j 10@Component 11public class CountryCacheListener implements CacheEventListener\u0026lt;String, Country\u0026gt; { 12 @Override 13 public void onEvent(CacheEvent\u0026lt;? extends String, ? extends Country\u0026gt; cacheEvent) { 14 log.info(\u0026#34;Cache event = {}, Key = {}, Old value = {}, New value = {}\u0026#34;, cacheEvent.getType(), cacheEvent.getKey(), cacheEvent.getOldValue(), cacheEvent.getNewValue()); 15 } 16} 1package com.demo.project98.service; 2 3import javax.cache.Cache; 4import javax.cache.CacheManager; 5 6import com.demo.project98.domain.Country; 7import jakarta.annotation.PostConstruct; 8import lombok.RequiredArgsConstructor; 9import lombok.extern.slf4j.Slf4j; 10import org.springframework.stereotype.Service; 11 12@Service 13@Slf4j 14@RequiredArgsConstructor 15public class CountryService { 16 17 /** 18 * Interact directly with CacheManager 19 */ 20 private final CacheManager cacheManager; 21 private Cache\u0026lt;String, Country\u0026gt; cache; 22 23 @PostConstruct 24 public void postInit() { 25 cache = cacheManager.getCache(\u0026#34;countryCache\u0026#34;); 26 } 27 28 public Country get(String code) { 29 log.info(\u0026#34;Getting country code: {}\u0026#34;, code); 30 return cache.get(code); 31 } 32 33 public void put(Country country) { 34 log.info(\u0026#34;Adding country: {}\u0026#34;, country); 35 cache.put(country.getCode(), country); 36 } 37 38 public void evict(String code) { 39 log.info(\u0026#34;Evicting country code: {}\u0026#34;, code); 40 cache.remove(code); 41 } 42 43} 1package com.demo.project98.service; 2 3import java.util.Optional; 4 5import com.demo.project98.domain.Customer; 6import com.demo.project98.repo.CustomerRepository; 7import lombok.RequiredArgsConstructor; 8import lombok.extern.slf4j.Slf4j; 9import org.springframework.cache.annotation.CacheEvict; 10import org.springframework.cache.annotation.CachePut; 11import org.springframework.cache.annotation.Cacheable; 12import org.springframework.stereotype.Service; 13 14@Service 15@RequiredArgsConstructor 16@Slf4j 17public class CustomerService { 18 19 final CustomerRepository customerRepository; 20 21 /** 22 * If return type is Optional\u0026lt;Customer\u0026gt; it will hit the db each time. As the cache is not configured for Optional\u0026lt;Customer\u0026gt; 23 */ 24 @Cacheable(cacheNames = \u0026#34;customerCache\u0026#34;, key = \u0026#34;#id\u0026#34;, unless = \u0026#34;#result == null\u0026#34;) 25 public Customer getCustomerById(Long id) { 26 log.info(\u0026#34;Getting customer {} from db!\u0026#34;, id); 27 Optional\u0026lt;Customer\u0026gt; customer = customerRepository.findById(id); 28 if (customer.isPresent()) { 29 return customer.get(); 30 } else { 31 return null; 32 } 33 } 34 35 /** 36 * Don\u0026#39;t put @Cacheable here as it will load everything into cache 37 */ 38 public Iterable\u0026lt;Customer\u0026gt; getCustomers() { 39 log.info(\u0026#34;Getting all customers from db!\u0026#34;); 40 return customerRepository.findAll(); 41 } 42 43 @CachePut(cacheNames = \u0026#34;customerCache\u0026#34;, key = \u0026#34;#result.id\u0026#34;) 44 public Customer save(Customer customer) { 45 log.info(\u0026#34;Saving customer {} to db!\u0026#34;, customer); 46 return customerRepository.save(customer); 47 } 48 49 @CacheEvict(cacheNames = \u0026#34;customerCache\u0026#34;, key = \u0026#34;#id\u0026#34;) 50 public void deleteById(Long id) { 51 log.info(\u0026#34;Deleting customer {} from db!\u0026#34;, id); 52 customerRepository.deleteById(id); 53 } 54 55 /** 56 * Will evict all entries in cache 57 */ 58 @CacheEvict(cacheNames = \u0026#34;customerCache\u0026#34;, allEntries = true) 59 public void evictAll() { 60 log.info(\u0026#34;evicting all customers from cache\u0026#34;); 61 } 62} 1package com.demo.project98.service; 2 3import java.math.BigDecimal; 4 5import lombok.extern.slf4j.Slf4j; 6import org.springframework.cache.annotation.Cacheable; 7import org.springframework.stereotype.Service; 8 9@Service 10@Slf4j 11public class NumberService { 12 13 /** 14 * @Cacheable results would be stored 15 * Condition is that result will be stored only for numbers \u0026gt; 10 16 */ 17 @Cacheable(value = \u0026#34;squareCache\u0026#34;, key = \u0026#34;#number\u0026#34;, condition = \u0026#34;#number\u0026gt;10\u0026#34;) 18 public BigDecimal square(Long number) { 19 BigDecimal square = BigDecimal.valueOf(number).multiply(BigDecimal.valueOf(number)); 20 log.info(\u0026#34;Square of {} is {}\u0026#34;, number, square); 21 return square; 22 } 23} 24 Notice the SQL is printed each time a db call happens, if the data is cached no DB call is made.\nPostman Import the postman collection to postman\nPostman Collection\nSetup 1# Project 98 2 3Spring Boot \u0026amp; Ehcache 4 5[https://gitorko.github.io/spring-ehcache/](https://gitorko.github.io/spring-ehcache/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk 21.0.3 2024-04-16 LTS 14``` 15 16### Postgres DB 17 18``` 19docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:14 20docker ps 21docker exec -it pg-container psql -U postgres -W postgres 22CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 23CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 24grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 25 26docker stop pg-container 27docker start pg-container 28``` 29 30### Dev 31 32To run the backend in dev mode. 33 34```bash 35./gradlew clean build 36./gradlew bootRun 37``` References https://www.ehcache.org/documentation/3.0\nhttps://docs.spring.io/spring-boot/docs/2.7.2/reference/htmlsingle/#io.caching\n","link":"https://gitorko.github.io/post/spring-ehcache/","section":"post","tags":["spring","spring-boot","ehcache"],"title":"Spring - EhCache"},{"body":"","link":"https://gitorko.github.io/tags/spring-boot/","section":"tags","tags":null,"title":"Spring-Boot"},{"body":"We will look at some of the best practices to be used during development of a distributed system.\nA distributed system should always assume that things will fail and should be designed with fault tolerance (ability to deal with faults) \u0026amp; resiliency (ability to recover) in mind.\nGithub: https://github.com/gitorko/project57\nDistributed System Blocking calls Problem Your service is not responding as there are some requests that are taking very long to complete. They are waiting on IO operations. What do you do?\nInvoke this rest api that takes 60 secs to complete the job.\n1for ((i=1;i\u0026lt;=10;i++)); 2do 3 echo $i 4 curl --location \u0026#39;http://localhost:8080/api/blocking-job/60\u0026#39; \u0026amp; 5done 6 7curl --location \u0026#39;http://localhost:8080/api/time\u0026#39; Determine if CPU intensive or IO intensive task and delegate the execution to a thread pool so that the core tomcat threads are free to serve requests. The default tomcat threads are 250 and any blocking that happens will affect the whole service.\nThere 2 types of protocol/connectors a tomcat server can be configured for\nBIO (Blocking IO) - The threads are not free till the response is sent back. (one thread per connection) NIO (Non-Blocking IO) - The threads are free to serve other requests while the incoming request is waiting for IO to complete. (more connections than threads) In the BIO configuration, there are 2 types of threads\nAcceptors — To accept incoming requests and to add in a queue. Acceptors discard any request when the queue if full, default is 100. Workers — To pick requests from the acceptor queue and process each request in its own thread stack Accept queue size\n1server: 2 tomcat: 3 accept-count: 100 You will see the below error when the tomcat rejects the request due to queue being full\n1Response code:Non HTTP response code: org.apache.http.conn.HttpHostConnectException 2Response message:Non HTTP response message: Connect to localhost:8080 [localhost/127.0.0.1, localhost/0:0:0:0:0:0:0:1] failed: Operation timed out Invoke this rest api that takes 60 secs to complete the job but delegates the job to another thread.\n1curl --location \u0026#39;http://localhost:8080/api/async-job/60\u0026#39; The below error is seen when the client has closed the connection but server is still processing the thread and tries to return a response on the connection.\n1w.s.m.s.DefaultHandlerExceptionResolver : Resolved [org.springframework.web.context.request.async.AsyncRequestNotUsableException: ServletOutputStream failed to flush: ServletOutputStream failed to flush: java.io.IOException: Broken pipe] Spring Reactor - Reactor is a non-blocking reactive programming model with back-pressure support, which supports NIO (non-blocking IO) Virtual Threads - Light-weight threads that were introduced in JDK21 Virtual Threads\nVirtual threads aim to improve the concurrency model in Java by introducing lightweight, user-mode threads that can efficiently handle a large number of concurrent tasks.\nIf your code calls a blocking I/O operation in a virtual thread, the runtime suspends the virtual thread until it can be resumed later. The hardware is utilized to an almost optimal level, resulting in high levels of concurrency and, therefore, high throughput.\nPitfalls to avoid in Virtual Threads\nExceptions - Stack traces are separate, and any Exception thrown in a virtual thread only includes its own stack frames. Thread-local - Reduce usage as each thread will end up creating its own thread local unlike before where there are limited threads in pool, virtual threads can be many as they are cheap to create. Synchronized blocks/methods - When there is synchronized method or block used the virtual thread is pinned to a platform thread, it will not relinquish its control. This means it will hold the platform thread which can cause performance issues if there is IO happening inside the synchronized block. Use ReentrantLock instead of synchronized. Native code - When native code is used virtual threads get pinned to platform threads, it will not relinquish its control. This may be problematic if IO happens for longer time there by blocking/holding the platform thread. Thread pools - Avoid thread pool to limit resource access, eg: A thread pool of size 10 can create more than 10 concurrent threads due to virtual threads hence use semaphore if you want to limit concurrent requests based on pool size. Spring - In sprint context use concurrency-limit to limit number of thread pool and avoid runway of virtual threads. Performance - Platform threads are better when CPU intensive tasks are executed compared to virtual threads. Virtual threads benefit only when there is IO. Context switching - When virtual threads have blocking operation they yield and JVM moves the stack to heap memory. The stack is put back only when its time to execute the thread again. This is still cheaper than creating a new platform thread though. 1Runnable fn = () -\u0026gt; { 2 System.out.println(\u0026#34;Running in thread: \u0026#34; + Thread.currentThread().getName()); 3}; 4 5Thread.ofVirtual().name(\u0026#34;virtual-thread-1\u0026#34;).start(fn); 6Thread.ofPlatform().name(\u0026#34;platform-thread-1\u0026#34;).start(fn); 7 8new Thread(fn, \u0026#34;platform-thread-2\u0026#34;).start(); 9 10var executors = Executors.newVirtualThreadPerTaskExecutor(); 11executors.submit(() -\u0026gt; { 12 System.out.println(\u0026#34;Running in thread: \u0026#34; + Thread.currentThread().threadId()); 13}); 1spring.threads.virtual.enabled=true Since the number of virtual threads created can be unlimited to ensure max concurrent requests use\n1spring: 2 task: 3 execution: 4 simple: 5 concurrency-limit: 10 6 scheduling: 7 simple: 8 concurrency-limit: 10 Denial-of-Service (DOS) Attacks Problem Your server is receiving a lot of bad TCP connections. A bad downstream client is making bad tcp connections that doesn't do anything, valid users are getting Denial-of-Service. What do you do?\nCreate 10 telnet connections that connect to the tomcat server and then invoke the rest api to getTime which will not return anything as it will wait till the TCP connection is free.\n1for ((i=1;i\u0026lt;=10;i++)); 2do 3 echo $i 4 telnet 127.0.0.1 8080 \u0026amp; 5done 1curl --location \u0026#39;http://localhost:8080/api/time\u0026#39; The connection timeout means - If the client is not sending data after establishing the TCP handshake for 'N' seconds then close the connection. The default timeout is 2 minutes\n1server.tomcat.connection-timeout=500 Note Many developers will assume that this connection timeout actually closes the connection when a long-running task takes more than 'N' seconds. This is not true. It only closes connection if the client doesn't send anything for 'N' seconds.\nTime Limiter Problem A new team member has updated an API and introduced a bug and the function is very slow or never returns a response. System users are complaining of a slow system?\nAlways prefer fail-fast instead of a slow system fail-later. By failing fast the downstream consumers of your service can use circuit breaker pattern to handle the outages gracefully instead of dealing with a slow api.\nIf a function takes too long to complete it will block the tomcat thread which will further degrade the system performance. Use Resilience4j @TimeLimiter to explicitly timeout long running jobs, this way runaway functions cant impact your entire system.\nInvoke this rest api that takes 10 secs to complete the job but timeout happens in 5 sec.\n1curl --location \u0026#39;http://localhost:8080/api/timeout-job/10\u0026#39; You will see the error related to timeout\n1java.util.concurrent.TimeoutException: TimeLimiter \u0026#39;project57-tl\u0026#39; recorded a timeout exception. 2\tat io.github.resilience4j.timelimiter.TimeLimiter.createdTimeoutExceptionWithName(TimeLimiter.java:225) ~[resilience4j-timelimiter-2.2.0.jar:2.2.0] 3\tat io.github.resilience4j.timelimiter.internal.TimeLimiterImpl$Timeout.lambda$of$0(TimeLimiterImpl.java:185) ~[resilience4j-timelimiter-2.2.0.jar:2.2.0] 4\tat java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:572) ~[na:na] 5\tat java.base/java.util.concurrent.FutureTask.run(FutureTask.java:317) ~[na:na] 6\tat java.base/java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304) ~[na:na] 7\tat java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1144) ~[na:na] 8\tat java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:642) ~[na:na] 9\tat java.base/java.lang.Thread.run(Thread.java:1583) ~[na:na] Spring also uses spring.mvc.async.request-timeout that ensures REST APIs can timeout after the configurable amount of time. Default is 30 seconds.\n1spring: 2 mvc: 3 async: 4 request-timeout: 30000 Note Always assume the functions/api will take forever and may never complete, design system accordingly by fencing the methods.\nRequest Thread Pool \u0026amp; Connections Problem During peak traffic users are reporting slow connection / timeout when connecting to your server? How many concurrent requests can your server handle?\nThe number of tomcat threads determine how many thread can handle the incoming requests. By default, this number is 200.\n1# Applies for BIO 2server: 3 tomcat: 4 threads: 5 max: 10 6 max-connections: 10 Max number of connections the server can accept and process, for BIO (Blocking IO) tomcat the server.tomcat.threads.max is equal to server.tomcat.max-connections You cant have more connections than the threads.\nFor NIO tomcat, the number of threads can be less and the max-connections can be more. Since the threads not blocked while waiting for IO to complete then can open up more connections and server other requests.\n1# Applies only for NIO 2server: 3 tomcat: 4 threads: 5 max: 10 6 max-connections: 1000 Protocol limits the max connections per machine to 65,536, which is max ports available in TCP.\nThroughput (requests served per second) of a single server depends on following\nNumber of tomcat threads Server hardware (CPU, Memory, SSD, Network Bandwidth) Type of task (IO intensive vs CPU intensive) If you have 200 threads (BIO) and all request response on average take 1 second (latency) to complete then your server can handle 200 requests per second. When there are IO intensive tasks which cause threads to wait and context switching takes place, throughput calculation becomes tricky and needs to be approximated.\nIdeal number of threads that can be picked depend on\n1 Number of CPU Cores 2Number of Threads \u0026lt;= ----------------------- 3 1 - Blocking Factor For computation intensive job Blocking Factor (BF) is 0. For IO intensive job Blocking Factor (BF) is between 0 \u0026amp; 1 (0 \u0026lt; BF \u0026lt; 1) If BF is 0, for computation intensive job Number of threads == Number of CPU cores. If 4 core CPU then 4 threads. If BF is 0.9 then for 4 core CPU machine the threads allowed (10 * no of cores) are 40. If BF is 0.5 then for 4 core CPU machine the threads allowed (2 * no of cores) are 8. Note Benchmark the system on a varied load to arrive at the peek throughput the system can handle.\nKeep-Alive Problem Network admin calls you to tell that many TCP connections are being created to the same clients. What do you do?\nTCP connections take time to be established, keep-alive keeps the connection alive for some more time incase the client want to send more data again in the new future.\n1server: 2 tomcat: 3 max-keep-alive-requests: 10 4 keep-alive-timeout: 10 max-keep-alive-requests - Max number of HTTP requests that can be pipelined before connection is closed. keep-alive-timeout - Keeps the TCP connection for sometime to avoid doing a handshake again if request from same client is sent. Rest Client Connection Timeout Problem You are invoking rest calls to an external service which has degraded and has become very slow there by causing your service to slow down. What do you do?\nIf the server makes external calls ensure to set the read and connection timeout on the rest client. If you dont set this then your server which is a client will wait forever to get the response.\n1# If unable to connect the external server then give up after 5 seconds. 2setConnectTimeout(5_000); 3# If unable to read data from external api call then give up after 5 seconds. 4setReadTimeout(5_000); Invoke this rest api that takes 10 secs as the external api is slow to complete the job but timeout happens in 5 sec.\n1curl --location \u0026#39;http://localhost:8080/api/external-api-job/10\u0026#39; You will see below error when timeouts are set\n12024-06-21T16:01:06.880+05:30 ERROR 25437 --- [nio-8080-exec-5] o.a.c.c.C.[.[.[/].[dispatcherServlet] : Servlet.service() for servlet [dispatcherServlet] in context with path [] threw exception [Request processing failed: org.springframework.web.client.ResourceAccessException: I/O error on GET request for \u0026#34;http://jsonplaceholder.typicode.com/users/1\u0026#34;: Read timed out] with root cause 2java.net.SocketTimeoutException: Read timed out 3\tat java.base/sun.nio.ch.NioSocketImpl.timedRead(NioSocketImpl.java:278) ~[na:na] 4\tat java.base/sun.nio.ch.NioSocketImpl.implRead(NioSocketImpl.java:304) ~[na:na] 5\tat java.base/sun.nio.ch.NioSocketImpl.read(NioSocketImpl.java:346) ~[na:na] If you are using WebClient then use Mono.timeout() or Flux.timeout() methods\nNote Always assume that all external API calls never return and design accordingly.\nDatabase Connection Pool Problem You are noticing database connection timeout. What do you do?\nUse a connection pool if you are interacting with database as it will prevent the connection from getting open \u0026amp; closed which is a costly operation. The connection in the pool will be reused. Optimal size of connection pool is recommended, too big a pool is bad as there is a lot of context switching. The database also defines max connections allowed per application.\nSpring boot provides Hikari connection pool. If there are run away SQL connections then service can quickly run out of connection in the pool and slow down the entire system.\n1spring: 2 datasource: 3 hikari: 4 maximumPoolSize: 5 5 connectionTimeout: 1000 6 idleTimeout: 60 7 maxLifetime: 180 By setting the connectionTimeout we ensure that when the connection pool is full then we timeout after 1 second instead of waiting forever to get a new connection.\nFail-Fast is always preferred than slowing down the entire service.\nInvoke this rest api that creates 10 new threads that request for DB connection while the pool only has 5.\n1curl --location \u0026#39;http://localhost:8080/api/async-db-job/10\u0026#39; You will see the below error\n1Caused by: org.hibernate.exception.JDBCConnectionException: Unable to acquire JDBC Connection [HikariPool-1 - Connection is not available, request timed out after 1001ms (total=5, active=5, idle=0, waiting=0)] [n/a] 2\tat org.hibernate.exception.internal.SQLExceptionTypeDelegate.convert(SQLExceptionTypeDelegate.java:51) ~[hibernate-core-6.5.2.Final.jar:6.5.2.Final] 3\tat org.hibernate.exception.internal.StandardSQLExceptionConverter.convert(StandardSQLExceptionConverter.java:58) ~[hibernate-core-6.5.2.Final.jar:6.5.2.Final] 4\tat org.hibernate.engine.jdbc.spi.SqlExceptionHelper.convert(SqlExceptionHelper.java:108) ~[hibernate-core-6.5.2.Final.jar:6.5.2.Final] 5\tat org.hibernate.engine.jdbc.spi.SqlExceptionHelper.convert(SqlExceptionHelper.java:94) ~[hibernate-core-6.5.2.Final.jar:6.5.2.Final] 6\tat org.hibernate.resource.jdbc.internal.LogicalConnectionManagedImpl.acquireConnectionIfNeeded(LogicalConnectionManagedImpl.java:116) ~[hibernate-core-6.5.2.Final.jar:6.5.2.Final] 7\tat org.hibernate.resource.jdbc.internal.LogicalConnectionManagedImpl.getPhysicalConnection(LogicalConnectionManagedImpl.java:143) ~[hibernate-core-6.5.2.Final.jar:6.5.2.Final] 8\tat org.hibernate.resource.jdbc.internal.LogicalConnectionManagedImpl.getConnectionForTransactionManagement(LogicalConnectionManagedImpl.java:273) ~[hibernate-core-6.5.2.Final.jar:6.5.2.Final] 9\tat org.hibernate.resource.jdbc.internal.LogicalConnectionManagedImpl.begin(LogicalConnectionManagedImpl.java:281) ~[hibernate-core-6.5.2.Final.jar:6.5.2.Final] 10\tat org.hibernate.resource.transaction.backend.jdbc.internal.JdbcResourceLocalTransactionCoordinatorImpl$TransactionDriverControlImpl.begin(JdbcResourceLocalTransactionCoordinatorImpl.java:232) ~[hibernate-core-6.5.2.Final.jar:6.5.2.Final] 11\tat org.hibernate.engine.transaction.internal.TransactionImpl.begin(TransactionImpl.java:83) ~[hibernate-core-6.5.2.Final.jar:6.5.2.Final] 12\tat org.springframework.orm.jpa.vendor.HibernateJpaDialect.beginTransaction(HibernateJpaDialect.java:176) ~[spring-orm-6.1.8.jar:6.1.8] 13\tat org.springframework.orm.jpa.JpaTransactionManager.doBegin(JpaTransactionManager.java:420) ~[spring-orm-6.1.8.jar:6.1.8] 14\t... 12 common frames omitted 15Caused by: java.sql.SQLTransientConnectionException: HikariPool-1 - Connection is not available, request timed out after 1001ms (total=5, active=5, idle=0, waiting=0) 16\tat com.zaxxer.hikari.pool.HikariPool.createTimeoutException(HikariPool.java:686) ~[HikariCP-5.1.0.jar:na] 17\tat com.zaxxer.hikari.pool.HikariPool.getConnection(HikariPool.java:179) ~[HikariCP-5.1.0.jar:na] 18\tat com.zaxxer.hikari.pool.HikariPool.getConnection(HikariPool.java:144) ~[HikariCP-5.1.0.jar:na] The configuration spring.hikari.connectionTimeout applies for new async thread pool. However, the tomcat thread pool will always wait in blocking state to get a connection from the pool.\nInvoke this rest api that runs 10 long-running db query job but will not timeout and wait in blocking state.\n1ab -n 10 -c 10 http://localhost:8080/api/db-long-query-job/5 JPA also enables first level cache by default inside a transactions/session. After transaction is done the entity is garbage collected. For cache across sessions use second level cache.\nNote Always assume that you will run out of database connections due to a bad api and set connection timeout for both the connection pool and thread pool to prevent them from waiting forever to get connections.\nLong-Running Database Query Problem DBA call you up and informs you that there is a long-running query in your service. What do you do?\nLong-running queries often slow down the entire system.\nTo check if there are long-running queries.\n1select * from pg_stat_activity To test this we explicitly slow down a query with pg_sleep function.\nWe set timeout on the transaction @Transactional(timeout = 5) to ensure that long-running query doesn't impact the entire system, after 5 seconds if the query doesn't return result an exception is thrown.\nFail-Fast is always preferred than slowing down the entire service.\n12024-06-21T16:24:08.130+05:30 WARN 27713 --- [nio-8080-exec-2] o.h.engine.jdbc.spi.SqlExceptionHelper : SQL Error: 0, SQLState: 57014 22024-06-21T16:24:08.130+05:30 ERROR 27713 --- [nio-8080-exec-2] o.h.engine.jdbc.spi.SqlExceptionHelper : ERROR: canceling statement due to user request 32024-06-21T16:24:08.138+05:30 ERROR 27713 --- [nio-8080-exec-2] o.a.c.c.C.[.[.[/].[dispatcherServlet] : Servlet.service() for servlet [dispatcherServlet] in context with path [] threw exception [Request processing failed: org.springframework.dao.QueryTimeoutException: JDBC exception executing SQL [select count(*), pg_sleep(?) IS NULL from customer] [ERROR: canceling statement due to user request] [n/a]; SQL [n/a]] with root cause 4 5org.postgresql.util.PSQLException: ERROR: canceling statement due to user request 6\tat org.postgresql.core.v3.QueryExecutorImpl.receiveErrorResponse(QueryExecutorImpl.java:2725) ~[postgresql-42.7.3.jar:42.7.3] 7\tat org.postgresql.core.v3.QueryExecutorImpl.processResults(QueryExecutorImpl.java:2412) ~[postgresql-42.7.3.jar:42.7.3] Note Always assume that all DB calls never return or are long-running and design accordingly.\nYou can further look at optimizing the query with help of indexes to avoid full table scan or introducing caching.\nYou can enable show-sql to view all the db queries however this will print to console without logging framework hence not recommended\n1spring: 2 jpa: 3 show-sql: true To pretty print SQL\n1spring: 2 jpa: 3 properties: 4 hibernate: 5 show_sql: true 6 format_sql: true To print the SQL in logging framework use\n1logging: 2 level: 3 root: info 4 org.hibernate.SQL: DEBUG 5 org.hibernate.type.descriptor.sql.BasicBinder: TRACE 6 org.hibernate.orm.jdbc.bind: TRACE Indexing Problem You tested your code for data fetch via SQL on dev setup ensuring that indexes were created and used. But in production the indexes are not being used despite being present, this is slowing your service. What do you do?\nCreating an index doesn't garuntee that the SQL execution engine will use those indexes. The optimizer might choose a full table scan over an index if it determines that it is optimal.\n1EXPLAIN (FORMAT JSON) select * from customer where city = \u0026#39;San Jose\u0026#39;; 2EXPLAIN (FORMAT JSON, ANALYSE) select * from customer where city = \u0026#39;San Jose\u0026#39;; Without Index\n1[ 2 { 3 \u0026#34;Plan\u0026#34;: { 4 \u0026#34;Node Type\u0026#34;: \u0026#34;Seq Scan\u0026#34;, 5 \u0026#34;Parallel Aware\u0026#34;: false, 6 \u0026#34;Async Capable\u0026#34;: false, 7 \u0026#34;Relation Name\u0026#34;: \u0026#34;customer\u0026#34;, 8 \u0026#34;Alias\u0026#34;: \u0026#34;customer\u0026#34;, 9 \u0026#34;Startup Cost\u0026#34;: 0.00, 10 \u0026#34;Total Cost\u0026#34;: 2.40, 11 \u0026#34;Plan Rows\u0026#34;: 1, 12 \u0026#34;Plan Width\u0026#34;: 39, 13 \u0026#34;Actual Startup Time\u0026#34;: 0.034, 14 \u0026#34;Actual Total Time\u0026#34;: 0.044, 15 \u0026#34;Actual Rows\u0026#34;: 1, 16 \u0026#34;Actual Loops\u0026#34;: 1, 17 \u0026#34;Filter\u0026#34;: \u0026#34;((city)::text = \u0026#39;San Jose\u0026#39;::text)\u0026#34;, 18 \u0026#34;Rows Removed by Filter\u0026#34;: 111 19 }, 20 \u0026#34;Planning Time\u0026#34;: 0.149, 21 \u0026#34;Triggers\u0026#34;: [ 22 ], 23 \u0026#34;Execution Time\u0026#34;: 0.078 24 } 25] With Index\n1[ 2 { 3 \u0026#34;Plan\u0026#34;: { 4 \u0026#34;Node Type\u0026#34;: \u0026#34;Index Scan\u0026#34;, 5 \u0026#34;Parallel Aware\u0026#34;: false, 6 \u0026#34;Async Capable\u0026#34;: false, 7 \u0026#34;Scan Direction\u0026#34;: \u0026#34;Forward\u0026#34;, 8 \u0026#34;Index Name\u0026#34;: \u0026#34;idx_customer_city\u0026#34;, 9 \u0026#34;Relation Name\u0026#34;: \u0026#34;customer\u0026#34;, 10 \u0026#34;Alias\u0026#34;: \u0026#34;customer\u0026#34;, 11 \u0026#34;Startup Cost\u0026#34;: 0.14, 12 \u0026#34;Total Cost\u0026#34;: 8.16, 13 \u0026#34;Plan Rows\u0026#34;: 1, 14 \u0026#34;Plan Width\u0026#34;: 1556, 15 \u0026#34;Actual Startup Time\u0026#34;: 0.031, 16 \u0026#34;Actual Total Time\u0026#34;: 0.033, 17 \u0026#34;Actual Rows\u0026#34;: 1, 18 \u0026#34;Actual Loops\u0026#34;: 1, 19 \u0026#34;Index Cond\u0026#34;: \u0026#34;((city)::text = \u0026#39;San Jose\u0026#39;::text)\u0026#34;, 20 \u0026#34;Rows Removed by Index Recheck\u0026#34;: 0 21 }, 22 \u0026#34;Planning Time\u0026#34;: 0.380, 23 \u0026#34;Triggers\u0026#34;: [ 24 ], 25 \u0026#34;Execution Time\u0026#34;: 0.090 26 } 27] Index Scan: This indicates that the query is using the index. The output will mention the specific index name. Seq Scan: This indicates a sequential scan, meaning the index is not being used. EXPLAIN ANALYZE: To see actual execution statistics rather than just an estimation, you can use EXPLAIN ANALYZE, which will run the query and provide runtime details.\nIf the table is small, and a full table scan is faster than using the index. A significant portion of the table matches the condition, making an index scan less efficient. If there are many rows with the same city value, the optimizer might prefer a sequential scan. Index occupies space and impacts insert and delete row performance. If there are 2 indexes then its upto the optimizer to pick the one it finds a best fit. Behaviour might change at runtime. Order in which the where clause is written will impact which index is used. The index column should be the first in the where clause and any other filtering logic should come after index columns. Always ensure that the where clause contains the same columns that are indexed. You can also use hints to ensure that optimizer chooses to use the indexes. To provide hints enable the extension\n1CREATE EXTENSION pg_hint_plan; Insert 100k records\n1INSERT INTO public.customer (city, name, phone) 2SELECT 3 \u0026#39;city_\u0026#39; || gs, 4 \u0026#39;name_\u0026#39; || gen_random_uuid(), 5 \u0026#39;phone_\u0026#39; || gs 6FROM generate_series(1, 5000000) AS gs; You can only provide a hint, there are no guarantee that optimizer will use those hints.\n1EXPLAIN (FORMAT JSON) /*+ IndexScan(customer idx_customer_city) */ 2SELECT * FROM public.customer WHERE city = \u0026#39;San Jose\u0026#39;; 3 4EXPLAIN (FORMAT JSON) /*+ IndexScan(customer idx_customer_city NO) */ 5SELECT * FROM public.customer WHERE city = \u0026#39;San Jose\u0026#39;; Check index\n1SELECT * FROM pg_indexes WHERE indexname = \u0026#39;idx_customer_city\u0026#39;; 2SELECT indexname, indexdef FROM pg_indexes WHERE tablename = \u0026#39;customer\u0026#39;; Database Schema Changes Problem You add a SQL change to modify an existing table schema or add a new index. The table already exists in production with 10 million rows. You test your change in QE environment which works fine but when your change hits production the database table gets locked for 30 minutes there by causing an outage. What do you do?\nChanges to the schema of existing tables locks the table.\nTill the time the liquibase change is not applied the server will not start, this could mean that your server will take a long time to come online. Any other existing services that are still up and are reading from that table will also be blocked on either read/write. This could mean a big down-time depending on the data size.\nInsert some test data\n1INSERT INTO customer (name, phone, city) 2SELECT 3 \u0026#39;Test-Name\u0026#39;, 4 \u0026#39;999-999-9999\u0026#39;, 5 \u0026#39;Test-City\u0026#39; 6FROM generate_series(1, 10000000); 7select count(*) from customer; Adding column with default value\nSince postgres 11 alter column with default value doesn't lock the table for read and write anymore as there is no table re-write. In older versions that table is entirely rewritten, so it's an expensive operation.\n1--since postgres11 this doesnt matter. 2ALTER TABLE customer ADD COLUMN last_update TIMESTAMP DEFAULT now(); vs\n1ALTER TABLE customer ADD COLUMN last_update TIMESTAMP; 2 3--This will take a long time, ensure that this happens in a different transaction and not part of alter table transaction. 4UPDATE customer SET last_update = now(); clean up\n1ALTER TABLE customer DROP COLUMN last_update; Lock queues \u0026amp; Lock timeouts\nPostgres uses lock queues. Transactions that modify a same row/table are queued, they remain blocked till they are executed in the order they were queued.\nUse lock timeout to set max limit to wait for operation. By setting lock_timeout, the DDL command will fail if it ends up waiting for a lock more than 5 seconds The downside is that your ALTER TABLE might not succeed, but you can try again later. Check pg_stat_activity to see if there are long-running queries before starting the DDL command.\n1SET lock_timeout TO \u0026#39;5s\u0026#39;; 2ALTER TABLE customer ADD COLUMN last_update TIMESTAMP; To look at the locks\n1select * from pg_locks; clean up\n1ALTER TABLE customer DROP COLUMN last_update; Creating/dropping indexes concurrently\nCreating an index on a large table can take long time. This can affect the startup times of your service. The create index command blocks all writes for the duration of the command. It doesn't block select it blocks only insert \u0026amp; delete. The create index concurrently is a better approach. Creating an index concurrently does have a downside. If something goes wrong it does not roll back and leaves an unfinished (\u0026quot;invalid\u0026quot;) index behind. If that happens, run drop index concurrently name_index and try to create it again.\n1CREATE INDEX name_index ON customer (name); vs\n1CREATE INDEX CONCURRENTLY name_index ON customer (name); clean up\n1DROP INDEX CONCURRENTLY name_index; Altering an indexed column \u0026amp; adding not null column\nAltering a column that already has index is a costly operation. If not null columns are added it's a 2 step operation where you add the column and then add a default value.\nTruncate vs Delete\nPrefer truncate over delete to clean a table. Truncate doesn't write to transactional log hence is faster but there is no option of rollback. Both block read \u0026amp; modify operations. Truncate quickly remove all rows from a table and do not need to worry about triggers, foreign key constraints, or retaining identity column values. Delete removes specific rows, rely on triggers, enforce foreign key constraints, or need the operation to be fully logged.\n1delete from customer; vs\n1truncate table customer; Modifying Large Data Set\nAnother approach of making changes to big tables and have them lock the table is by copying the data to a new table and then renaming it after the operation is completed.\nThe below SQL will block all reads on the table till the transaction is completed.\n1BEGIN; 2ALTER TABLE customer ADD COLUMN age INTEGER; 3 4--This will take a long time, instead of DEFAULT we can refer to some other table and populate age here. 5UPDATE customer SET age = (select 18); 6 7ALTER TABLE customer ALTER COLUMN age SET NOT NULL; 8COMMIT; The below SQL will create a copy of the table and modify the data and then rename it. This means that reads are not blocked unlike the above SQL.\n1BEGIN; 2CREATE TABLE customer_copy AS SELECT * FROM customer; 3ALTER TABLE customer_copy ADD COLUMN age INTEGER; 4--This will take a long time, instead of DEFAULT we can refer to some other table and populate age here. 5UPDATE customer_copy SET age = (select 18); 6ALTER TABLE customer_copy ALTER COLUMN age SET NOT NULL; 7DROP TABLE customer; 8ALTER TABLE customer_copy RENAME TO customer; 9COMMIT; clean up\n1ALTER TABLE customer DROP COLUMN age; Adding a primary key\nIf you are adding/modifying primary key then index creation take a long time. You need to introduce an unqiue constraint concurrently CREATE UNIQUE INDEX CONCURRENTLY and then use the unique index as a primary key, which is a fast operation.\n1--drop primary key for testing 2ALTER TABLE customer DROP CONSTRAINT customer_pkey; 1-- blocks queries for a long time 2ALTER TABLE customer ADD PRIMARY KEY (id); 1-- takes a long time, but doesn\u0026#39;t block queries 2CREATE UNIQUE INDEX CONCURRENTLY customer_unq ON customer (id); 3-- blocks queries, but only very briefly 4ALTER TABLE customer ADD CONSTRAINT customer_pkey PRIMARY KEY USING INDEX customer_unq; Locking in Database\nTable level locks Row level locks Transactions run concurrently until they try to acquire a conflicting lock like updating the same row. The first transaction to acquire the lock can proceed, and the second one waits until the first transaction commits or aborts. Locks are always kept until commit or rollback.\nThere are 2 types of locks\nShared lock (FOR SHARE) Exclusive lock (FOR UPDATE) Below query acquires a row lock that prevent any modification to the selected row.\n1--other transactions can still read the same row but cant modify it. 2SELECT * from customer where id = 1 FOR SHARE; 1--other transactions cant even read/modify the same row 2SELECT * from customer where id = 1 FOR UPDATE; Never VACUUM FULL\nThe AUTOVACUUM is a background process that automatically performs vacuuming which helps manage and optimize the storage of data within the database.\nReclaims Storage Prevents Transaction ID Wraparound Updates Statistics Maintains Indexes To optimize PostgreSQL performance, you need to adjust autovacuum settings and effectively use indexes Running VACUUM (but not VACUUM FULL) periodically can help maintain database health.\nDatabase rollback The database schema must be compatible with the previous version to ensure that application rollback doesn't require database rollback. Database rollback should be avoided as much as possible.\nMemory Leak \u0026amp; CPU Spike Problem You tested your service on your laptop and local kubernetes instance. In production the admin informs you that your pods are restarting frequently. What do you do?\nMemory leaks are always hard to debug, a badly written method can cause spike in heap memory usage causing lot of GC (garbage collection) which are stop of the world events.\nWith kubernetes you can define resource limits that kill the pod if tries to use more resources than allocated. Limit define the limits for the container, requests define limit for single container as there can be multiple containers in single pod.\n1resources: 2 requests: 3 cpu: \u0026#34;250m\u0026#34; 4 memory: \u0026#34;250Mi\u0026#34; 5 limits: 6 cpu: \u0026#34;2\u0026#34; 7 memory: \u0026#34;500Mi\u0026#34; Invoke this rest api that creates a memory leak in the jvm.\n1curl --location \u0026#39;http://localhost:8080/api/memory-leak-job/999\u0026#39; This causes a memory spike, the pod will be killed (OOMKilled) and a new pod brought up.\nNote For an OutOfMemoryError the pod doesn't necessarily kill the pod unless some health check is configured. Pod will still remain in running state despite the OOM error. Only the resource limits defined determine when the pod gets killed.\n1Exception in thread \u0026#34;http-nio-8080-exec-1\u0026#34; java.lang.OutOfMemoryError: Java heap space Response Payload Size Problem Your rest api returns list of customer records, However as more customers are added in production the size of response becomes bigger \u0026amp; bigger and slows down the request-response times.\n1curl --location \u0026#39;http://localhost:8080/api/customer\u0026#39; Always add pagination support and avoid returning all the data in a single response. Data may grow later causing response size to get bigger over a period of time.\n1curl --location \u0026#39;http://localhost:8080/api/customer-page\u0026#39; Enable gzip compression which also reduce the size of response payload.\n1server: 2 compression: 3 enabled: true 4 # Minimum response when compression will kick in 5 min-response-size: 512 6 # Mime types that should be compressed 7 mime-types: text/xml, text/plain, application/json You can also consider using GraphQL so that client can request for only the data it needs\nYou can also change the protocol to http2 to get more benefits like multiplexing many requests over single tcp connection.\n1server: 2 http2: 3 enabled: true HTTP caching - You can also avoid sending response if the payload hasn't changed since last modified time. If the response contains Last-Modified or ETag the client can re-use the previous payload as nothing has changed.\nLast-Modified Client will send the last modified If-Modified-Since header field and if payload hasnt changed server will return 304 Not Modified\nEtag\nShallow Hashing - Client sends the previous ETag and server generates the whole payload and then create a ETag and matches if it is same. If yes then return 304 Not Modified. Deep Hashing - Client sends previous Etag and server compares it against the latest ETag it holds in cache. If same then returns 304 Not Modified Note Always try to reduce the size of the response payload, send only the data required instead of the whole payload. Use pagination for data records and gzip payload to reduce the size.\nIf there is an api being called every second then it makes sends to either use Web Sockets or Server Send Events (SSE) which can stream data and avoid the costly request-response.\nAPI versioning \u0026amp; Feature Flag Problem A new team member has updated an existing API \u0026amp; introduced a new feature that was used by many downstream applications, however a bug got introduced and now all the downstream api are failing.\nAlways look at versioning your api instead of updating existing api that are used by downstream services. This contains the blast radius of any bug.\neg: /api/v1/customers being the old api and /api/v2/customers being the new api\nUse feature flag that can be toggled on/off if any issues arise.\n1management: 2 endpoint: 3 refresh: 4 enabled: true Note Backward compatibility is very important, specially when services rollback to older versions in distributed systems. Always work with versioned API or feature flag if there are major changes or new features being introduced.\nBulk Head Pattern Problem Thread pools are shared, a runway function is occupying the thread pool 100% and not letting other tasks execute. What do you do?\nBulkhead defines maximum number of concurrent calls allowed to be executed in a given timeframe. This prevents failures in a system/API from affecting other systems/APIs\nThe @Bulkhead is the annotation used to enable bulkhead on an API call. This can be applied at the method level or a class level. If applied at the class level, it applies to all public methods.\n1resilience4j: 2 bulkhead: 3 instances: 4 project57-b1: 5 max-concurrent-calls: 2 6 max-wait-duration: 10ms max-concurrent-calls - Number of concurrent calls allowed max-wait-duration - Wait for 10ms before failing in case of the limit breach 1ab -n 10 -c 10 http://localhost:8080/api/bulk-head-job 1Complete requests: 10 2Failed requests: 7 3 (Connect: 0, Receive: 0, Length: 7, Exceptions: 0) 4Non-2xx responses: 7 Rate Limit vs Bulk Head\nrate-limit - Allow this api to run only 10 requests per min. bulk-head - Allow this api to use only 10 threads from the pool per min to run. Rest of threads will be available for other API. Rate Limiter Problem A particular api of your service is overused due to a wrong retry logic in a client which just keeps spamming your server on that single api.\nLook at implementing rate limiting. Rate limiting can be implemented at gateway level or at application level. It helps prevent Denial of Service attacks.\nFor rate limiting implementation at gateway level refer\nhttp://gitorko.github.io/post/spring-traefik-rate-limit\nThe @RateLimiter is the annotation used to rate-limit an API call and applied at the method or class levels. If applied at the class level, it applies to all public methods\n1resilience4j: 2 ratelimiter: 3 instances: 4 project57-r1: 5 limit-for-period: 5 6 limit-refresh-period: 1s 7 timeout-duration: 0s timeout-duration - default wait time a thread waits for a permission limit-refresh-period - time window to count the requests limit-for-period - number of requests or method invocations are allowed in the above limit-refresh-period 1ab -n 10 -c 10 http://localhost:8080/api/rate-limit-job 1Complete requests: 10 2Failed requests: 5 Note Always assume that your api will be invoked by clients more than they are intended to be invoked due to wrong retry configuration.\nRetry Problem One of the downstream service had a minor glitch (restart) and your rest call failed the first time it got a bad response. What do you do?\nRest calls often fail in distributed environment. You need to retry @Retry the api with exponential backoff and max attempts to avoid overwhelming the server.\nEnsure that the external rest api being called in retry is idempotent.\n1resilience4j: 2 retry: 3 instances: 4 project57-y1: 5 max-attempts: 3 6 waitDuration: 10s 7 enableExponentialBackoff: true 8 exponentialBackoffMultiplier: 2 9 retryExceptions: 10 - org.springframework.web.client.HttpClientErrorException 11 ignoreExceptions: 12 - org.springframework.web.client.HttpServerErrorException Invoke this rest api that fails the first 2 times and succeeds on the 3rd attempt.\n1curl --location \u0026#39;http://localhost:8080/api/retry-job\u0026#39; Circuit Breaker Pattern The circuit breaker pattern protects a downstream service by restricting the upstream service from calling the downstream service during a partial or complete downtime.\nThe @CircuitBreaker will close the circuit so that downstream client dont keep calling the same api again \u0026amp; again when it is having issues.\n1resilience4j: 2 circuitbreaker: 3 instances: 4 project57-c1: 5 failure-rate-threshold: 50 6 minimum-number-of-calls: 5 7 automatic-transition-from-open-to-half-open-enabled: true 8 wait-duration-in-open-state: 5s 9 permitted-number-of-calls-in-half-open-state: 3 10 sliding-window-size: 10 11 sliding-window-type: count_based Invoke the below api to open and close the circuit. If more failures are seen circuit is opened which mean no traffic can flow.\nA CircuitBreaker can be in three states:\nCLOSED – API working fine OPEN – API experiencing issues, all requests to it are short-circuited HALF_OPEN – API experiencing issues and some traffic will be allowed periodically to check if server recovered In half open mode only few requests are allowed to check if service recovered. In closed state it will send 503 Service Unavailable error.\n1curl --location \u0026#39;http://localhost:8080/api/circuit-breaker-job/true\u0026#39; 1curl --location \u0026#39;http://localhost:8080/api/circuit-breaker-job/false\u0026#39; Health Check Observability \u0026amp; Monitoring Problem Your customer reaches out each time there is an issue. Is there an active way to monitor your system instead of waiting for customer to report the issue? What do you do?\nMonitoring - ensures the system is healthy. You can monitor CPU usage, memory usage, request rates, and error rates. Observability - helps you understand issues and derive insights. You can use active monitoring setup which will proactively look for issues that happen in your system so that you can address them.\nObservability is the ability to observe the internal state of a running system from the outside. Observability has 3 pillars\nLogging - Logging Correlation IDs - Correlation IDs provide a helpful way to link lines in your log files to spans/traces. Metrics - Custom metrics to monitor time taken, count invocations etc. Distributed Tracing - Micrometer Tracing library is a facade for popular tracer libraries. eg: OpenTelemetry, OpenZipkin Brave https://gitorko.github.io/post/spring-observability/\nException Handling Problem You errors are returning 500 Internal Server error, downstream services are not able to determine reason for the error.\nUse @RestControllerAdvice to return custom error responses. If you have generic exception then use @Order to determine which exception gets returned first in a nested exception.\nTo get more details in the error response enable these\n1server: 2 error: 3 include-binding-errors: always 4 include-exception: false 5 include-message: always 6 include-path: always 7 include-stacktrace: never Be aware that if you are using dev tools org.springframework.boot:spring-boot-devtools the error response will be detailed by default and will not behave same in production unless the above properties are configured.\nLogging Problem Kubernetes pods are ephemeral, you dont have access to history logs that are written to console.\nEnable file logging Enable rolling of log file Enable trace-id in log file Enable GC logging Enable async logging (does come with risk of loosing few log messages) Logs must contain pod name to determine which instance the error occurred on Log file name must contain pod name File logging\n1logging: 2 file: 3 name: project57-app-${HOSTNAME}.log 4 logback: 5 rollingpolicy: 6 file-name-pattern: logs/%d{yyyy-MM, aux}/project57-app-${HOSTNAME}.%d{yyyy-MM-dd}.%i.log 7 max-file-size: 100MB 8 total-size-cap: 10GB 9 max-history: 10 10 level: 11 root: info GC logging\n1\u0026#39;-Xlog:gc*=info:file=logs/project57-gc.log:time,uptime,level,tags:filecount=5,filesize=100m\u0026#39;, On kubernetes write the log to a persistent volume else you will loose the logs on pod restart.\nYou can use FluentD or Promtail log brokers that collect and send logs to an Elasticsearch/Loki storage.\nJVM tuning Problem Users are reporting that once in a while the API response is really long and it returns back to normal response time in a short while. What do you do?\nGarbage collection can impact response times as GC is stop of the world event. When major GC happens it pauses all threads which might impact response time for time sensitive api.\nTune your JVM and enable logging and monitoring (actuator + prometheus) on the GC\n-Xms, -Xmx - Places boundaries on the heap size to increase the predictability of garbage collection. The heap size is limited in replica servers so that even Full GCs do not trigger SIP retransmissions. -Xms sets the starting size to prevent pauses caused by heap expansion. -XX:+UseG1GC - Use the Garbage First (G1) Collector. -XX:MaxGCPauseMillis - Sets a target for the maximum GC pause time. This is a soft goal, and the JVM will make its best effort to achieve it. -XX:ParallelGCThreads - Sets the number of threads used during parallel phases of the garbage collectors. The default value varies with the platform on which the JVM is running. -XX:ConcGCThreads - Number of threads concurrent garbage collectors will use. The default value varies with the platform on which the JVM is running. -XX:InitiatingHeapOccupancyPercent - Percentage of the (entire) heap occupancy to start a concurrent GC cycle. GCs that trigger a concurrent GC cycle based on the occupancy of the entire heap and not just one of the generations, including G1, use this option. A value of 0 denotes 'do constant GC cycles'. The default value is 45. -XX:HeapDumpOnOutOfMemoryError - Will dump the heap to file in case of out of memory error. 1\u0026#39;-server\u0026#39; 2\u0026#39;-Xms250m\u0026#39;, 3\u0026#39;-Xmx500m\u0026#39;, 4\u0026#39;-XX:+HeapDumpOnOutOfMemoryError\u0026#39; 5\u0026#39;-XX:+UseG1GC\u0026#39;, 6\u0026#39;-XX:MaxGCPauseMillis=200\u0026#39;, 7\u0026#39;-XX:ParallelGCThreads=20\u0026#39;, 8\u0026#39;-XX:ConcGCThreads=5\u0026#39;, 9\u0026#39;-XX:InitiatingHeapOccupancyPercent=70\u0026#39;, 10\u0026#39;-Xlog:gc*=info:file=project57-gc.log:time,uptime,level,tags:filecount=5,filesize=100m Server Startup Time Problem Your notice your server startup time is slow, it takes 10 sec for the server to startup. What do you do?\nYou can enable lazy initialization, Spring won’t create all beans on startup it will inject no dependencies until that bean is needed\nYou can check if autoconfigured beans are being set and disable them if not required.\n1logging: 2 level: 3 org.springframework.boot.autoconfigure: DEBUG Disable JMX beans to save on time\n1spring: 2 jmx: 3 enabled: false 1spring: 2 main: 3 lazy-initialization: true GraalVM uses Ahead of Time (AOT) Compilation creates a native binary image that doesn't require Java to run. It will increase startup time and reduce memory footprint. It optimizes by doing static analysis, removal of unused code, creating fixed classpath, etc.\nSince Java 11, there is no pre-bundled JRE provided. As a result, basic Dockerfiles without any optimization can result in large image sizes. To reduce size of docker image\nUse Minimal Base Images Use Docker Multistage Builds Minimize the Number of Layers Use jlink to build custom JRE Create .dockerignore to leave out readme files. Use jdeps to strip dependencies not used. Security Problem You have ensured that you don't print any customer information in logs, however the heapdump file that was shared in a ticket now exposes passwords to any user without access. What do you do?\nSome of the basic security checks\nNo credit card numbers in logs. No passwords in logs. No User personal information in logs. No PII (Personal Identifiable Information) in logs Permissions to production is restricted to few people by Authentication \u0026amp; Authorization. Salt has been added to password before storing it. Url don't have password or secure information in parameter as url get logged. Custom exceptions are thrown to customer and dont expose the backend exception to the end user. Cross site scripting is blocked. SQL injection attacks are blocked. Vulnerability scan are done and libraries updated to use latest fix. Input is always validated API keys / token is used to allow authenticated \u0026amp; authorized use of api Password are stored in encrypted format not in plain text, use Vault Allow listings (white listing) defines IP from which request can originate HTTPS upto gateway and HTTP can be used internally within network Audit logging trail is present to identify who changed what at what time. Use event sourcing where update events are queued and written to a secondary db/table. Data retention is planned to delete data which is no longer required. However heap dump file is one area that can leak passwords if the file is shared.\nTrigger a password generation request and at the same time take a heap dump. You will see the password in plain text.\n1curl --location \u0026#39;http://localhost:8080/api/job15/60\u0026#39; Note Heap dump files also need to protected with password similar to production data access.\nOther Failures Distributed system can fail at various points, other areas of failure that can happen and need to be factored in design are\nPrimary DB failure or data corruption - Active-Active setup vs Active-Passive setup Secondary DB replication failure Queue failures - message loss during restart Network failures External Systems can go down Service nodes can go down so your service must be resilient to this Cache invalidation/eviction (TTL) failure Load Balancer failures Datacenter failure for one region Chaos Monkey testing CDN failure Audit Logging failure Network failure Code 1package com.demo.project57; 2 3import lombok.extern.slf4j.Slf4j; 4import org.springframework.boot.CommandLineRunner; 5import org.springframework.boot.SpringApplication; 6import org.springframework.boot.autoconfigure.SpringBootApplication; 7import org.springframework.context.annotation.Bean; 8 9@SpringBootApplication 10@Slf4j 11public class Main { 12 public static void main(String[] args) { 13 SpringApplication.run(Main.class, args); 14 } 15 16 @Bean 17 public CommandLineRunner onStart() { 18 return args -\u0026gt; { 19 log.info(\u0026#34;On Start!\u0026#34;); 20 }; 21 } 22} 1package com.demo.project57.controller; 2 3import java.net.InetAddress; 4import java.time.Instant; 5import java.time.LocalDateTime; 6import java.util.ArrayList; 7import java.util.Arrays; 8import java.util.HashMap; 9import java.util.List; 10import java.util.Map; 11import java.util.concurrent.CompletableFuture; 12 13import com.demo.project57.config.CloudConfig; 14import com.demo.project57.domain.Customer; 15import com.demo.project57.service.CustomerService; 16import io.github.resilience4j.bulkhead.annotation.Bulkhead; 17import io.github.resilience4j.circuitbreaker.annotation.CircuitBreaker; 18import io.github.resilience4j.ratelimiter.annotation.RateLimiter; 19import io.github.resilience4j.timelimiter.annotation.TimeLimiter; 20import io.swagger.v3.oas.annotations.Operation; 21import io.swagger.v3.oas.annotations.media.Content; 22import io.swagger.v3.oas.annotations.media.Schema; 23import io.swagger.v3.oas.annotations.responses.ApiResponse; 24import io.swagger.v3.oas.annotations.responses.ApiResponses; 25import jakarta.validation.Valid; 26import lombok.AllArgsConstructor; 27import lombok.Data; 28import lombok.RequiredArgsConstructor; 29import lombok.SneakyThrows; 30import lombok.extern.slf4j.Slf4j; 31import org.passay.CharacterRule; 32import org.passay.EnglishCharacterData; 33import org.passay.PasswordGenerator; 34import org.springframework.cache.Cache; 35import org.springframework.cache.CacheManager; 36import org.springframework.data.domain.Pageable; 37import org.springframework.http.ResponseEntity; 38import org.springframework.security.crypto.factory.PasswordEncoderFactories; 39import org.springframework.web.bind.annotation.GetMapping; 40import org.springframework.web.bind.annotation.PathVariable; 41import org.springframework.web.bind.annotation.PostMapping; 42import org.springframework.web.bind.annotation.PutMapping; 43import org.springframework.web.bind.annotation.RequestBody; 44import org.springframework.web.bind.annotation.RequestMapping; 45import org.springframework.web.bind.annotation.RestController; 46import org.springframework.web.client.RestClient; 47 48@RestController 49@RequiredArgsConstructor 50@Slf4j 51@RequestMapping(\u0026#34;/api\u0026#34;) 52public class HomeController { 53 54 private final CustomerService customerService; 55 private final RestClient restClient; 56 private final CloudConfig cloudConfig; 57 private final CacheManager cacheManager; 58 59 Map\u0026lt;MyKey, byte[]\u0026gt; customerMap = new HashMap\u0026lt;\u0026gt;(); 60 List\u0026lt;Customer\u0026gt; customerList; 61 Cache cache; 62 63 @SneakyThrows 64 @GetMapping(\u0026#34;/time\u0026#34;) 65 public String getTime() { 66 log.info(\u0026#34;Getting server time!\u0026#34;); 67 String podName = InetAddress.getLocalHost().getHostName(); 68 return \u0026#34;Pod: \u0026#34; + podName + \u0026#34; : \u0026#34; + LocalDateTime.now(); 69 } 70 71 /** 72 * Will block the tomcat threads and hence no other requests can be processed 73 */ 74 @GetMapping(\u0026#34;/blocking-job/{delay}\u0026#34;) 75 public String blockingJob(@PathVariable Long delay) { 76 log.info(\u0026#34;blockingJob request received, delay: {}\u0026#34;, delay); 77 return customerService.longRunningJob(delay); 78 } 79 80 /** 81 * Will not block the tomcat threads and hence no other requests can be processed 82 */ 83 @GetMapping(\u0026#34;/async-job/{delay}\u0026#34;) 84 public CompletableFuture\u0026lt;String\u0026gt; asyncJob(@PathVariable Long delay) { 85 log.info(\u0026#34;asyncJob request received, delay: {}\u0026#34;, delay); 86 return CompletableFuture.supplyAsync(() -\u0026gt; { 87 return customerService.longRunningJob(delay); 88 }); 89 } 90 91 /** 92 * The @TimeLimiter will timeout if the job takes too long. 93 * The job will still run in the background, There is no way to kill a thread in java you can only interrupt. 94 */ 95 @GetMapping(\u0026#34;/timeout-job/{delay}\u0026#34;) 96 @TimeLimiter(name = \u0026#34;project57-t1\u0026#34;) 97 public CompletableFuture\u0026lt;String\u0026gt; timeoutJob(@PathVariable Long delay) { 98 log.info(\u0026#34;timeoutJob request received, delay: {}\u0026#34;, delay); 99 return CompletableFuture.supplyAsync(() -\u0026gt; { 100 return customerService.longRunningJob(delay); 101 }); 102 } 103 104 /** 105 * API calling an external API that is not responding 106 * Here timeout on the rest client is configured 107 */ 108 @GetMapping(\u0026#34;/external-api-job/{delay}\u0026#34;) 109 public String externalApiJob(@PathVariable Long delay) { 110 log.info(\u0026#34;externalApiJob request received, delay: {}\u0026#34;, delay); 111 String result = restClient.get() 112 .uri(\u0026#34;/users/1?_delay=\u0026#34; + (delay * 1000)) 113 .retrieve() 114 .body(String.class); 115 log.info(\u0026#34;externalApiJob response: {}\u0026#34;, result); 116 return result; 117 } 118 119 /** 120 * Over user of db connection by run-away thread pool 121 */ 122 @GetMapping(\u0026#34;/async-db-job/{threads}\u0026#34;) 123 public void asyncDbJob(@PathVariable int threads) { 124 log.info(\u0026#34;asyncDbJob request received, threads: {}\u0026#34;, threads); 125 customerService.invokeAsyncDbCall(threads, 1); 126 } 127 128 /** 129 * Long-running query without timeout 130 * Explicit delay of 10 seconds introduced in DB query 131 */ 132 @GetMapping(\u0026#34;/db-long-query-job/{delay}\u0026#34;) 133 public int dbLongQueryJob(@PathVariable Long delay) { 134 log.info(\u0026#34;dbLongQueryJob request received, delay: {}\u0026#34;, delay); 135 return customerService.getCustomerCount1(delay); 136 } 137 138 /** 139 * Long-running query with timeout of 5 seconds 140 */ 141 @GetMapping(\u0026#34;/db-long-query-timeout-job/{delay}\u0026#34;) 142 public int dbLongQueryTimeoutJob(@PathVariable Long delay) { 143 log.info(\u0026#34;dbLongQueryTimeoutJob request received, delay: {}\u0026#34;, delay); 144 return customerService.getCustomerCount2(delay); 145 } 146 147 /** 148 * Create memory leak and spike in heap memory 149 * Map keeps growing on each call and eventually causes OOM error 150 * If the key is unique the map should have fixed set of entries no matter how many times we invoke 151 * Key in hashmap has to be immutable 152 */ 153 @GetMapping(\u0026#34;/memory-leak-job/{records}\u0026#34;) 154 public ResponseEntity memoryLeakJob(@PathVariable Long records) { 155 log.info(\u0026#34;memoryLeakJob request received\u0026#34;); 156 for (int i = 0; i \u0026lt; records; i++) { 157 //By creating a non-immutable key it creates a memory leak 158 customerMap.put(new MyKey(\u0026#34;customer_\u0026#34; + i), new byte[100000]); 159 } 160 return ResponseEntity.ok().build(); 161 } 162 163 /** 164 * Will allow GC to recover the space 165 */ 166 @GetMapping(\u0026#34;/load-heap-job/{records}\u0026#34;) 167 public ResponseEntity loadHeapJob(@PathVariable Long records) { 168 log.info(\u0026#34;loadHeapJob request received\u0026#34;); 169 customerList = new ArrayList\u0026lt;\u0026gt;(); 170 for (int i = 0; i \u0026lt; records; i++) { 171 //By creating a non-immutable key it creates a memory leak 172 customerList.add(Customer.builder() 173 .id(Long.valueOf(i)) 174 .name(\u0026#34;customer_\u0026#34; + i) 175 .city(\u0026#34;city_\u0026#34; + i) 176 .build()); 177 } 178 return ResponseEntity.ok().build(); 179 } 180 181 /** 182 * Bulk head 183 */ 184 @GetMapping(\u0026#34;/bulk-head-job\u0026#34;) 185 @Bulkhead(name = \u0026#34;project57-b1\u0026#34;) 186 public String bulkHeadJob() { 187 log.info(\u0026#34;bulkHeadJob request received\u0026#34;); 188 return customerService.longRunningJob(5l); 189 } 190 191 /** 192 * Rate limit 193 */ 194 @GetMapping(\u0026#34;/rate-limit-job\u0026#34;) 195 @RateLimiter(name = \u0026#34;project57-r1\u0026#34;) 196 public String rateLimitJob(@PathVariable Long delay) { 197 log.info(\u0026#34;rateLimitJob request received\u0026#34;); 198 return customerService.longRunningJob(5l); 199 } 200 201 @GetMapping(\u0026#34;/retry-job\u0026#34;) 202 public String retryJob() { 203 log.info(\u0026#34;retryJob request received\u0026#34;); 204 return customerService.getTime(); 205 } 206 207 /** 208 * If this api keeps failing, after 50% failure rate the circuit will be closed 209 * It will then return 503 Service Unavailable 210 */ 211 @GetMapping(\u0026#34;/circuit-breaker-job/{fail}\u0026#34;) 212 @CircuitBreaker(name = \u0026#34;project57-c1\u0026#34;) 213 public String circuitBreakerJob(@PathVariable Boolean fail) { 214 log.info(\u0026#34;circuitBreakerJob request received\u0026#34;); 215 if (fail) { 216 throw new RuntimeException(\u0026#34;Failed Job!\u0026#34;); 217 } else { 218 return Instant.now().toString(); 219 } 220 } 221 222 /** 223 * Secret Password generated using library Passay 224 * Use salt and encode password before storing them. 225 */ 226 @GetMapping(\u0026#34;/password-gen-job/{delay}\u0026#34;) 227 public String passwordGenJob(@PathVariable Long delay) { 228 log.info(\u0026#34;passwordGenJob request received\u0026#34;); 229 List\u0026lt;CharacterRule\u0026gt; charList = Arrays.asList( 230 new CharacterRule(EnglishCharacterData.UpperCase, 2), 231 new CharacterRule(EnglishCharacterData.LowerCase, 2), 232 new CharacterRule(EnglishCharacterData.Digit, 2)); 233 PasswordGenerator passwordGenerator = new PasswordGenerator(); 234 String newPassword = passwordGenerator.generatePassword(15, charList); 235 log.info(\u0026#34;Password generated, Wont be printed!\u0026#34;); 236 var encoder = PasswordEncoderFactories.createDelegatingPasswordEncoder(); 237 String encodedPassword = encoder.encode(newPassword); 238 log.info(\u0026#34;Encoded Password {}\u0026#34;, encodedPassword); 239 customerService.longRunningJob(delay); 240 return encodedPassword; 241 } 242 243 /** 244 * Depending on the feature flag a different code will be executed. 245 * Feature flag can be updated/refreshed while server is running 246 */ 247 @GetMapping(\u0026#34;/feature-job\u0026#34;) 248 public String featureJob() { 249 log.info(\u0026#34;featureJob request received\u0026#34;); 250 if (cloudConfig.getNewFeatureFlag()) { 251 return \u0026#34;Feature v2\u0026#34;; 252 } else { 253 return \u0026#34;Feature v1\u0026#34;; 254 } 255 } 256 257 @GetMapping(\u0026#34;/customer\u0026#34;) 258 public Iterable\u0026lt;Customer\u0026gt; findAllCustomer() { 259 log.info(\u0026#34;Finding All Customers!\u0026#34;); 260 return customerService.findAllCustomer(); 261 } 262 263 @PostMapping(\u0026#34;/customer\u0026#34;) 264 public Customer saveCustomer(@RequestBody @Valid Customer customer) { 265 log.info(\u0026#34;Saving Customer!\u0026#34;); 266 return customerService.saveCustomer(customer); 267 } 268 269 @GetMapping(\u0026#34;/customer-page\u0026#34;) 270 public Iterable\u0026lt;Customer\u0026gt; findAllCustomerByPage(Pageable pageable) { 271 log.info(\u0026#34;Finding All Customers By Page!\u0026#34;); 272 return customerService.findAllCustomerByPage(pageable); 273 } 274 275 @PutMapping(\u0026#34;/cache-put/{key}/{value}\u0026#34;) 276 public String cachePut(@PathVariable String key, @PathVariable String value) { 277 log.info(\u0026#34;cachePut request received\u0026#34;); 278 cache = cacheManager.getCache(\u0026#34;countryCache\u0026#34;); 279 cache.put(key, value); 280 return \u0026#34;done!\u0026#34;; 281 } 282 283 @GetMapping(\u0026#34;/cache-get/{key}\u0026#34;) 284 public String cacheGet(@PathVariable String key) { 285 log.info(\u0026#34;cacheGet request received\u0026#34;); 286 cache = cacheManager.getCache(\u0026#34;countryCache\u0026#34;); 287 return String.valueOf(cache.get(key).get()); 288 } 289 290 @GetMapping(\u0026#34;/error\u0026#34;) 291 public ResponseEntity\u0026lt;?\u0026gt; errorJob() { 292 log.info(\u0026#34;error request received\u0026#34;); 293 throw new RuntimeException(\u0026#34;My Custom Error\u0026#34;); 294 } 295 296 @Operation(summary = \u0026#34;Greet Controller\u0026#34;) 297 @ApiResponses(value = { 298 @ApiResponse(responseCode = \u0026#34;200\u0026#34;, description = \u0026#34;Found User\u0026#34;, content = {@Content(mediaType = \u0026#34;application/json\u0026#34;, schema = @Schema(implementation = Greet.class))}), 299 @ApiResponse(responseCode = \u0026#34;400\u0026#34;, description = \u0026#34;Invalid User Provided\u0026#34;, content = @Content), 300 @ApiResponse(responseCode = \u0026#34;404\u0026#34;, description = \u0026#34;User Not Found\u0026#34;, content = @Content)}) 301 @GetMapping(\u0026#34;/greet/{name}\u0026#34;) 302 public ResponseEntity\u0026lt;Greet\u0026gt; greet(@PathVariable String name) { 303 if (name == null || name.isBlank()) { 304 return ResponseEntity.badRequest().build(); 305 } 306 if (name.equalsIgnoreCase(\u0026#34;unknown\u0026#34;)) { 307 return ResponseEntity.notFound().build(); 308 } 309 return ResponseEntity.ok(new Greet(\u0026#34;Hello \u0026#34; + name)); 310 } 311 312 @GetMapping(\u0026#34;/fetch/{city}\u0026#34;) 313 public List\u0026lt;Customer\u0026gt; getByCity(@PathVariable String city) { 314 log.info(\u0026#34;Fetching by city request received\u0026#34;); 315 return customerService.getByCity(city); 316 } 317 318 @AllArgsConstructor 319 @Data 320 class MyKey { 321 String key; 322 } 323 324 @AllArgsConstructor 325 @Data 326 class Greet { 327 String message; 328 } 329 330} 1package com.demo.project57.service; 2 3import java.time.LocalDateTime; 4import java.util.List; 5import java.util.concurrent.TimeUnit; 6import java.util.concurrent.atomic.AtomicLong; 7 8import com.demo.project57.domain.Customer; 9import com.demo.project57.exception.CustomerException; 10import com.demo.project57.repository.CustomerRepository; 11import io.github.resilience4j.retry.annotation.Retry; 12import lombok.RequiredArgsConstructor; 13import lombok.SneakyThrows; 14import lombok.extern.slf4j.Slf4j; 15import org.springframework.data.domain.Pageable; 16import org.springframework.http.HttpStatusCode; 17import org.springframework.stereotype.Service; 18import org.springframework.transaction.annotation.Transactional; 19import org.springframework.web.client.HttpClientErrorException; 20 21@Service 22@RequiredArgsConstructor 23@Slf4j 24public class CustomerService { 25 26 private final CustomerRepository customerRepository; 27 private final CustomerAsyncService customerAsyncService; 28 AtomicLong counter = new AtomicLong(); 29 30 public Iterable\u0026lt;Customer\u0026gt; findAllCustomer() { 31 return customerRepository.findAll(); 32 } 33 34 public Iterable\u0026lt;Customer\u0026gt; findAllCustomerByPage(Pageable pageable) { 35 return customerRepository.findAll(pageable); 36 } 37 38 /** 39 * Will block till the db returns data 40 */ 41 public int getCustomerCount1(long delay) { 42 return customerRepository.getCustomerCount(delay); 43 } 44 45 /** 46 * Will time out after 5 seconds 47 */ 48 @Transactional(timeout = 5) 49 public int getCustomerCount2(long delay) { 50 return customerRepository.getCustomerCount(delay); 51 } 52 53 /** 54 * Will invoke db call from multiple threads 55 */ 56 public void invokeAsyncDbCall(int threads, long delay) { 57 for (int i = 0; i \u0026lt; threads; i++) { 58 //Query the DB \u0026#39;N\u0026#39; times 59 customerAsyncService.getCustomerCount(delay); 60 } 61 } 62 63 @SneakyThrows 64 public String longRunningJob(Long delay) { 65 log.info(\u0026#34;Long running job started!\u0026#34;); 66 TimeUnit.SECONDS.sleep(delay); 67 log.info(\u0026#34;Long running job completed!\u0026#34;); 68 return \u0026#34;Job completed @\u0026#34; + LocalDateTime.now(); 69 } 70 71 @Retry(name = \u0026#34;project57-y1\u0026#34;) 72 public String getTime() { 73 log.info(\u0026#34;Getting time from api!\u0026#34;); 74 //Simulating a failure first 2 times 75 if (counter.incrementAndGet() \u0026lt; 3) { 76 throw new HttpClientErrorException(HttpStatusCode.valueOf(500)); 77 } else { 78 counter = new AtomicLong(); 79 return String.valueOf(LocalDateTime.now()); 80 } 81 } 82 83 public Customer saveCustomer(Customer customer) { 84 if (customer.getCity().equals(\u0026#34;unknown\u0026#34;)) { 85 throw new CustomerException(\u0026#34;Unknown city for customer!\u0026#34;); 86 } 87 return customerRepository.save(customer); 88 } 89 90 public List\u0026lt;Customer\u0026gt; getByCity(String city) { 91 return customerRepository.getByCity(city); 92 } 93} 1package com.demo.project57.service; 2 3import com.demo.project57.repository.CustomerRepository; 4import lombok.RequiredArgsConstructor; 5import lombok.extern.slf4j.Slf4j; 6import org.springframework.scheduling.annotation.Async; 7import org.springframework.scheduling.annotation.EnableAsync; 8import org.springframework.stereotype.Service; 9import org.springframework.transaction.annotation.Propagation; 10import org.springframework.transaction.annotation.Transactional; 11 12@Service 13@EnableAsync 14@RequiredArgsConstructor 15@Slf4j 16public class CustomerAsyncService { 17 private final CustomerRepository customerRepository; 18 19 /** 20 * Each method run in parallel causing connection pool to become full. 21 * Explicitly creating many connections so we run out of connections 22 */ 23 @Transactional(propagation = Propagation.REQUIRES_NEW) 24 @Async 25 public void getCustomerCount(long delay) { 26 log.info(\u0026#34;getCustomerCount invoked!\u0026#34;); 27 int count = customerRepository.getCustomerCount(delay); 28 log.info(\u0026#34;getCustomerCount completed: {}\u0026#34;, count); 29 } 30 31} 1spring: 2 main: 3 banner-mode: \u0026#34;off\u0026#34; 4 lazy-initialization: false 5 datasource: 6 driver-class-name: org.postgresql.Driver 7 url: jdbc:postgresql://${POSTGRES_HOST}:5432/${POSTGRES_DB} 8 username: ${POSTGRES_USER} 9 password: ${POSTGRES_PASSWORD} 10 hikari: 11 maximumPoolSize: 5 12 connectionTimeout: 1000 13 idleTimeout: 60 14 maxLifetime: 180 15 jpa: 16 show-sql: false 17 hibernate.ddl-auto: none 18 database-platform: org.hibernate.dialect.PostgreSQLDialect 19 defer-datasource-initialization: false 20 properties: 21 hibernate: 22 show_sql: false 23 format_sql: true 24 open-in-view: false 25 threads: 26 virtual: 27 enabled: false 28 cloud: 29 config: 30 enabled: false 31 task: 32 execution: 33 simple: 34 concurrency-limit: 10 35 scheduling: 36 simple: 37 concurrency-limit: 10 38 mvc: 39 async: 40 request-timeout: 5000 41 liquibase: 42 change-log: db/changelog/db.changelog.yaml 43 enabled: true 44server: 45 http2: 46 enabled: false 47 port: 8080 48 compression: 49 enabled: true 50 # Minimum response when compression will kick in 51 min-response-size: 512 52 # Mime types that should be compressed 53 mime-types: text/xml, text/plain, application/json 54 tomcat: 55 connection-timeout: 500 56 threads: 57 max: 10 # Maximum amount of worker threads. 58 min-spare: 10 # Minimum amount of worker threads. 59 max-connections: 10 # Maximum number of connections that the server accepts and processes. 60 max-keep-alive-requests: 10 61 keep-alive-timeout: 10 62 accept-count: 100 # Maximum queue size for incoming connection requests 63 error: 64 include-binding-errors: always 65 include-exception: false 66 include-message: always 67 include-path: always 68 include-stacktrace: never 69 70resilience4j: 71 timelimiter: 72 instances: 73 project57-t1: 74 timeoutDuration: 5s 75 cancelRunningFuture: true 76 metrics: 77 enabled: true 78 ratelimiter: 79 instances: 80 project57-r1: 81 limit-for-period: 5 82 limit-refresh-period: 1s 83 timeout-duration: 0s 84 metrics: 85 enabled: true 86 bulkhead: 87 instances: 88 project57-b1: 89 max-concurrent-calls: 2 90 max-wait-duration: 10ms 91 metrics: 92 enabled: true 93 retry: 94 instances: 95 project57-y1: 96 max-attempts: 3 97 waitDuration: 10s 98 enableExponentialBackoff: true 99 exponentialBackoffMultiplier: 2 100 retryExceptions: 101 - org.springframework.web.client.HttpClientErrorException 102 ignoreExceptions: 103 - org.springframework.web.client.HttpServerErrorException 104 metrics: 105 enabled: true 106 circuitbreaker: 107 instances: 108 project57-c1: 109 failure-rate-threshold: 50 110 minimum-number-of-calls: 5 111 automatic-transition-from-open-to-half-open-enabled: true 112 wait-duration-in-open-state: 5s 113 permitted-number-of-calls-in-half-open-state: 3 114 sliding-window-size: 10 115 sliding-window-type: count_based 116 metrics: 117 enabled: true 118 119logging: 120 pattern: 121 level: \u0026#39;%5p [${HOSTNAME:}]\u0026#39; 122 level: 123 root: info 124 org.hibernate.SQL: DEBUG 125 org.hibernate.type.descriptor.sql.BasicBinder: TRACE 126 org.hibernate.orm.jdbc.bind: TRACE 127 file: 128 name: logs/project57-app-${HOSTNAME}.log 129 logback: 130 rollingpolicy: 131 file-name-pattern: logs/%d{yyyy-MM, aux}/project57-app-${HOSTNAME}.%d{yyyy-MM-dd}.%i.log 132 max-file-size: 100MB 133 total-size-cap: 10GB 134 max-history: 10 135 136management: 137 endpoint: 138 refresh: 139 enabled: true 140 health: 141 enabled: true 142 show-details: always 143 show-components: always 144 metrics: 145 enabled: true 146 info: 147 env: 148 enabled: true 149 enabled: true 150 env: 151 post: 152 enabled: true 153 endpoints: 154 web: 155 exposure: 156 include: \u0026#39;*\u0026#39; 157 158project57: 159 newFeatureFlag: false Postman Import the postman collection to postman\nPostman Collection\nSetup 1# Project 57 2 3Distributed System Essentials 4 5[https://gitorko.github.io/distributed-system-essentials/](https://gitorko.github.io/distributed-system-essentials/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk 21.0.3 2024-04-16 LTS 14``` 15 16### Postgres DB 17 18```bash 19docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:14 20docker ps 21docker exec -it pg-container psql -U postgres -W postgres 22CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 23CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 24grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 25 26docker stop pg-container 27docker start pg-container 28``` 29 30To run postgres with `pg_hint_plan` 31 32```bash 33docker build --no-cache -t my-postgres-image -f docker/Dockerfile . 34docker run -p 5432:5432 --name my-postgres-container -e POSTGRES_PASSWORD=mysecretpassword -d my-postgres-image 35docker exec -it my-postgres-container psql -U postgres -W postgres 36CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 37CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 38grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 39 40CREATE EXTENSION pg_hint_plan; 41``` 42 43### Dev 44 45To run the backend in dev mode. 46 47```bash 48./gradlew clean build 49./gradlew bootRun 50 51``` 52 53Command to check port on Mac 54 55```bash 56lsof -i tcp:8080 57``` 58 59### Kubernetes 60 61Stop any existing postgres db 62 63```bash 64docker stop pg-container 65brew services stop postgresql@14 66``` 67 68```bash 69kubectl config use-context docker-desktop 70 71mkdir /tmp/data 72 73./gradlew clean build 74docker build -f k8s/Dockerfile --force-rm -t project57:1.0.0 . 75kubectl apply -f k8s/deployment.yaml 76kubectl get pods -w 77 78kubectl logs -f service/project57-service 79 80kubectl delete -f k8s/deployment.yaml 81``` 82 83To build a small docker image 84 85```bash 86docker build -f k8s/Dockerfile-Small --force-rm -t project57:1.0.0 . 87docker run -d -p 8080:8080 -e POSTGRES_HOST=\u0026#34;10.177.140.150\u0026#34; -e POSTGRES_DB=\u0026#34;test-db\u0026#34; -e POSTGRES_USER=\u0026#34;test\u0026#34; -e POSTGRES_PASSWORD=\u0026#34;test@123\u0026#34; project57:1.0.0 88``` 89 90### Swagger 91 92[http://localhost:8080/swagger-ui/index.html](http://localhost:8080/swagger-ui/index.html) 93 94[http://localhost:8080/v3/api-docs](http://localhost:8080/v3/api-docs) References https://resilience4j.readme.io/docs\nhttps://www.fluentd.org/\n","link":"https://gitorko.github.io/post/distributed-system-essentials/","section":"post","tags":["fail-fast","resilience4j","kubernetes","spring","postgres","bulkhead","rate-limit","circuit-breaker","spring-boot","indexing"],"title":"Best Practices for Building Distributed Systems"},{"body":"","link":"https://gitorko.github.io/tags/bulkhead/","section":"tags","tags":null,"title":"Bulkhead"},{"body":"","link":"https://gitorko.github.io/tags/circuit-breaker/","section":"tags","tags":null,"title":"Circuit-Breaker"},{"body":"","link":"https://gitorko.github.io/categories/distributed-system/","section":"categories","tags":null,"title":"Distributed-System"},{"body":"","link":"https://gitorko.github.io/tags/fail-fast/","section":"tags","tags":null,"title":"Fail-Fast"},{"body":"","link":"https://gitorko.github.io/tags/indexing/","section":"tags","tags":null,"title":"Indexing"},{"body":"","link":"https://gitorko.github.io/tags/kubernetes/","section":"tags","tags":null,"title":"Kubernetes"},{"body":"","link":"https://gitorko.github.io/tags/rate-limit/","section":"tags","tags":null,"title":"Rate-Limit"},{"body":"","link":"https://gitorko.github.io/tags/resilience4j/","section":"tags","tags":null,"title":"Resilience4j"},{"body":"","link":"https://gitorko.github.io/tags/csv/","section":"tags","tags":null,"title":"Csv"},{"body":"","link":"https://gitorko.github.io/tags/flow/","section":"tags","tags":null,"title":"Flow"},{"body":"","link":"https://gitorko.github.io/tags/job/","section":"tags","tags":null,"title":"Job"},{"body":"","link":"https://gitorko.github.io/tags/jpa/","section":"tags","tags":null,"title":"Jpa"},{"body":"","link":"https://gitorko.github.io/tags/orchestration/","section":"tags","tags":null,"title":"Orchestration"},{"body":"","link":"https://gitorko.github.io/tags/retry/","section":"tags","tags":null,"title":"Retry"},{"body":"Spring boot implementation of multi level job workflow using spring batch\nGithub: https://github.com/gitorko/project67\nSpring Batch Spring batch is typically used to process data (read-process-write) in the background. Here we will use it as a workflow orchestration engine to execute jobs that take long time to complete for non-batch oriented flow.\nAs an example we take a travel booking flow, where a customer books flight, hotel, cab in a single click but the actual bookings are done in the background flow. We can use an event based model but the challenges with event model is that it's difficult to track a specific job \u0026amp; restart a specific job. It's difficult to view the event queues and see which jobs are stuck. We might even have to use priority queue to move some event ahead of the queue if the queue has a huge backlog. If there are various steps involved we might have different queues for each, this will allow restart of steps if they fail.\nJobBuilder - Create a Job. StepBuilder - Creates a Step which is part of a job. A step can perform a chunk-oriented task, tasklet, or any other processing logic. JobStepBuilder - Creates a Step that encapsulates a job within a step. This allows you to run an entire job as a single step within another job Nested Job. FlowBuilder - Defines a Flow of steps that can be executed within a job. A Flow can contain multiple steps, decision points, and other nested flows. A Flow can be part of multiple jobs or nested within other flows. FlowJobBuilder - Defines a FlowJob that executes a flow. Wraps a Flow into a Job, allowing the Flow to be executed as part of a job Nested Flow. The RunIdIncrementer provides a mechanism to generate a unique job parameter (run.id) for each execution of a job. This is needed for re-running jobs with the same configuration and parameters multiple times without conflicts.\nSo we will use spring batch which provides Job \u0026amp; Step flow to orchestrate our booking flow.\nFeatures:\nRetry will be attempted in case of failure with transactional rollback. Jobs can be restarted You can write if-else flows in the logic After you run the code you see the jobs complete.\n1curl --location \u0026#39;http://localhost:8080/book-travel\u0026#39; \\ 2--header \u0026#39;Content-Type: application/json\u0026#39; \\ 3--data \u0026#39;{ 4 \u0026#34;customer\u0026#34;: \u0026#34;ryan\u0026#34; 5}\u0026#39; You can also run a batch job that reads a csv and writes it to a csv file and db.\n1curl --location --request POST \u0026#39;http://localhost:8080/employee-batch-job\u0026#39; \\ 2--data \u0026#39;\u0026#39; Code 1package com.demo.project67.task; 2 3import java.time.LocalDateTime; 4 5import com.demo.project67.domain.BookingEvent; 6import com.demo.project67.repository.BookingEventRepository; 7import com.demo.project67.service.HelperUtil; 8import lombok.RequiredArgsConstructor; 9import lombok.extern.slf4j.Slf4j; 10import org.springframework.batch.core.StepContribution; 11import org.springframework.batch.core.scope.context.ChunkContext; 12import org.springframework.batch.core.step.tasklet.Tasklet; 13import org.springframework.batch.repeat.RepeatStatus; 14import org.springframework.stereotype.Service; 15import org.springframework.transaction.annotation.Transactional; 16 17@Service 18@Slf4j 19@RequiredArgsConstructor 20public class BookCabTask { 21 22 final BookingEventRepository bookingEventRepository; 23 24 @Transactional 25 public Tasklet bookCab() { 26 return new Tasklet() { 27 @Override 28 public RepeatStatus execute(StepContribution contribution, ChunkContext chunkContext) throws Exception { 29 Long bookingId = (Long) chunkContext.getStepContext() 30 .getJobParameters() 31 .get(\u0026#34;bookingId\u0026#34;); 32 log.info(\u0026#34;Running bookCab, bookingId: {}\u0026#34;, bookingId); 33 String customer = (String) chunkContext.getStepContext() 34 .getJobParameters() 35 .get(\u0026#34;customer\u0026#34;); 36 bookingEventRepository.save( 37 BookingEvent.builder() 38 .event(\u0026#34;Cab Booked for customer \u0026#34; + customer) 39 .bookingId(bookingId) 40 .createdOn(LocalDateTime.now()) 41 .build() 42 ); 43 HelperUtil.delay(10); 44 log.info(\u0026#34;Completed bookCab, bookingId: {}\u0026#34;, bookingId); 45 return RepeatStatus.FINISHED; 46 } 47 }; 48 } 49} 1package com.demo.project67.task; 2 3import java.time.LocalDateTime; 4 5import com.demo.project67.domain.BookingEvent; 6import com.demo.project67.repository.BookingEventRepository; 7import com.demo.project67.service.HelperUtil; 8import lombok.RequiredArgsConstructor; 9import lombok.extern.slf4j.Slf4j; 10import org.springframework.batch.core.step.tasklet.Tasklet; 11import org.springframework.batch.repeat.RepeatStatus; 12import org.springframework.stereotype.Service; 13 14@Service 15@Slf4j 16@RequiredArgsConstructor 17public class BookFlightTask { 18 final BookingEventRepository bookingEventRepository; 19 20 public Tasklet bookFlight() { 21 return (contribution, chunkContext) -\u0026gt; { 22 Long bookingId = (Long) chunkContext.getStepContext() 23 .getJobParameters() 24 .get(\u0026#34;bookingId\u0026#34;); 25 log.info(\u0026#34;Running bookFlight, bookingId: {}\u0026#34;, bookingId); 26 String customer = (String) chunkContext.getStepContext() 27 .getJobParameters() 28 .get(\u0026#34;customer\u0026#34;); 29 bookingEventRepository.save( 30 BookingEvent.builder() 31 .event(\u0026#34;Flight Booked for customer \u0026#34; + customer) 32 .bookingId(bookingId) 33 .createdOn(LocalDateTime.now()) 34 .build() 35 ); 36 HelperUtil.delay(10); 37 log.info(\u0026#34;Completed bookFlight, bookingId: {}\u0026#34;, bookingId); 38 return RepeatStatus.FINISHED; 39 }; 40 } 41} 1package com.demo.project67.task; 2 3import java.time.LocalDateTime; 4import java.util.concurrent.atomic.AtomicInteger; 5 6import com.demo.project67.domain.BookingEvent; 7import com.demo.project67.repository.BookingEventRepository; 8import com.demo.project67.service.HelperUtil; 9import lombok.RequiredArgsConstructor; 10import lombok.extern.slf4j.Slf4j; 11import org.springframework.batch.core.step.tasklet.Tasklet; 12import org.springframework.batch.repeat.RepeatStatus; 13import org.springframework.stereotype.Service; 14import org.springframework.transaction.annotation.Transactional; 15 16@Service 17@Slf4j 18@RequiredArgsConstructor 19public class FlightNotificationTask { 20 final BookingEventRepository bookingEventRepository; 21 AtomicInteger attemptCounter = new AtomicInteger(); 22 23 /** 24 * Retry in flow 25 */ 26 @Transactional 27 public Tasklet sendingFlightNotificationTask() { 28 return (contribution, chunkContext) -\u0026gt; { 29 Long bookingId = (Long) chunkContext.getStepContext() 30 .getJobParameters() 31 .get(\u0026#34;bookingId\u0026#34;); 32 log.info(\u0026#34;Running sendingFlightNotificationTask, bookingId: {}, Attempt: {}\u0026#34;, bookingId, attemptCounter.get()); 33 String customer = (String) chunkContext.getStepContext() 34 .getJobParameters() 35 .get(\u0026#34;customer\u0026#34;); 36 bookingEventRepository.save( 37 BookingEvent.builder() 38 .event(\u0026#34;Flight Notification Sent to customer \u0026#34; + customer + \u0026#34;, Attempt: \u0026#34; + attemptCounter.get()) 39 .bookingId(bookingId) 40 .createdOn(LocalDateTime.now()) 41 .build() 42 ); 43 log.info(\u0026#34;sendingFlightNotificationTask, bookingId: {}, Attempt: {}\u0026#34;, bookingId, attemptCounter.get()); 44 HelperUtil.delay(10); 45 //Simulate error for first 2 attempts 46 if (attemptCounter.incrementAndGet() \u0026lt; 3) { 47 log.error(\u0026#34;Failed to send flight notification!\u0026#34;); 48 throw new RuntimeException(\u0026#34;Failed to send flight notification!\u0026#34;); 49 } 50 log.info(\u0026#34;Completed sendingFlightNotificationTask, bookingId: {}, Attempt: {}\u0026#34;, bookingId, attemptCounter.get()); 51 return RepeatStatus.FINISHED; 52 }; 53 } 54} 1package com.demo.project67.workflow; 2 3import com.demo.project67.exception.NotificationExceptionHandler; 4import com.demo.project67.task.FlightNotificationTask; 5import com.demo.project67.task.HotelNotificationTask; 6import lombok.RequiredArgsConstructor; 7import lombok.extern.slf4j.Slf4j; 8import org.springframework.batch.core.BatchStatus; 9import org.springframework.batch.core.Job; 10import org.springframework.batch.core.Step; 11import org.springframework.batch.core.job.builder.JobBuilder; 12import org.springframework.batch.core.job.flow.FlowExecutionStatus; 13import org.springframework.batch.core.job.flow.JobExecutionDecider; 14import org.springframework.batch.core.repository.JobRepository; 15import org.springframework.batch.core.step.builder.JobStepBuilder; 16import org.springframework.batch.core.step.builder.StepBuilder; 17import org.springframework.context.annotation.Bean; 18import org.springframework.context.annotation.Configuration; 19import org.springframework.transaction.PlatformTransactionManager; 20 21/** 22 * Shows 2 different way of doing retry 23 * 24 * 2 Jobs with 1 Step each. 25 */ 26@Configuration 27@RequiredArgsConstructor 28@Slf4j 29public class NotificationWorkflow { 30 31 final JobRepository jobRepository; 32 final PlatformTransactionManager transactionManager; 33 final HotelNotificationTask hotelNotificationTask; 34 final FlightNotificationTask flightNotificationTask; 35 final NotificationExceptionHandler notificationExceptionHandler; 36 37 @Bean(name = \u0026#34;notificationStartJob\u0026#34;) 38 public Job notificationStartJob(Job sendFlightNotificationJob, Job sendHotelNotificationJob) { 39 return new JobBuilder(\u0026#34;notificationStartJob\u0026#34;, jobRepository) 40 .start(sendFlightNotificationJobStep(sendFlightNotificationJob)) 41 .next(sendHotelNotificationJobStep(sendHotelNotificationJob)) 42 .build(); 43 } 44 45 private Step sendFlightNotificationJobStep(Job sendFlightNotificationJob) { 46 return new JobStepBuilder(new StepBuilder(\u0026#34;sendFlightNotificationJobStep\u0026#34;, jobRepository)) 47 .job(sendFlightNotificationJob) 48 .build(); 49 } 50 51 private Step sendHotelNotificationJobStep(Job sendHotelNotificationJob) { 52 return new JobStepBuilder(new StepBuilder(\u0026#34;sendHotelNotificationJobStep\u0026#34;, jobRepository)) 53 .job(sendHotelNotificationJob) 54 .build(); 55 } 56 57 @Bean(name = \u0026#34;sendFlightNotificationJob\u0026#34;) 58 public Job sendFlightNotificationJob(Step sendFlightNotificationStep) { 59 return new JobBuilder(\u0026#34;sendFlightNotificationJob\u0026#34;, jobRepository) 60 .start(sendFlightNotificationStep) 61 .next(retryDecider()) 62 .from(retryDecider()).on(\u0026#34;RETRY\u0026#34;).to(sendFlightNotificationStep) 63 .from(retryDecider()).on(\u0026#34;COMPLETED\u0026#34;).end() 64 .end() 65 .build(); 66 } 67 68 @Bean(name = \u0026#34;sendHotelNotificationJob\u0026#34;) 69 public Job sendHotelNotificationJob(Step sendHotelNotificationStep) { 70 return new JobBuilder(\u0026#34;sendHotelNotificationJob\u0026#34;, jobRepository) 71 .start(sendHotelNotificationStep) 72 .build(); 73 } 74 75 @Bean(name = \u0026#34;sendFlightNotificationStep\u0026#34;) 76 public Step sendNotificationStep() { 77 return new StepBuilder(\u0026#34;sendFlightNotificationStep\u0026#34;, jobRepository) 78 .tasklet(flightNotificationTask.sendingFlightNotificationTask(), transactionManager) 79 .exceptionHandler(notificationExceptionHandler) 80 .build(); 81 } 82 83 @Bean(name = \u0026#34;sendHotelNotificationStep\u0026#34;) 84 public Step sendHotelNotificationStep() { 85 return new StepBuilder(\u0026#34;sendHotelNotificationStep\u0026#34;, jobRepository) 86 .tasklet(hotelNotificationTask.sendHotelNotificationTask(), transactionManager) 87 .exceptionHandler(notificationExceptionHandler) 88 .build(); 89 } 90 91 private JobExecutionDecider retryDecider() { 92 return (jobExecution, stepExecution) -\u0026gt; { 93 if (stepExecution.getStatus() == BatchStatus.FAILED) { 94 return new FlowExecutionStatus(\u0026#34;RETRY\u0026#34;); 95 } 96 return new FlowExecutionStatus(\u0026#34;COMPLETED\u0026#34;); 97 }; 98 } 99} Postman Import the postman collection to postman\nPostman Collection\nSetup 1# Project 67 2 3Spring Batch - Multi Stage Job Orchestration 4 5[https://gitorko.github.io/post/spring-batch-orchestration](https://gitorko.github.io/post/spring-batch-orchestration) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk 21.0.3 2024-04-16 LTS 14``` 15 16### Postgres DB 17 18```bash 19docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:14 20docker ps 21docker exec -it pg-container psql -U postgres -W postgres 22CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 23CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 24grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 25 26docker stop pg-container 27docker start pg-container 28``` 29 30### Dev 31 32To run the backend in dev mode. 33 34```bash 35./gradlew clean build 36./gradlew bootRun 37 38``` 39 References https://spring.io/projects/spring-batch\n","link":"https://gitorko.github.io/post/spring-batch-orchestration/","section":"post","tags":["spring","spring-boot","retry","orchestration","workflow","postgres","flow","tasklet","job","step","csv","jpa"],"title":"Spring Batch - Multi Stage Job Orchestration"},{"body":"","link":"https://gitorko.github.io/categories/springbatch/","section":"categories","tags":null,"title":"SpringBatch"},{"body":"","link":"https://gitorko.github.io/tags/step/","section":"tags","tags":null,"title":"Step"},{"body":"","link":"https://gitorko.github.io/tags/tasklet/","section":"tags","tags":null,"title":"Tasklet"},{"body":"","link":"https://gitorko.github.io/tags/workflow/","section":"tags","tags":null,"title":"Workflow"},{"body":"","link":"https://gitorko.github.io/tags/rsocket/","section":"tags","tags":null,"title":"Rsocket"},{"body":"","link":"https://gitorko.github.io/categories/rsocket/","section":"categories","tags":null,"title":"Rsocket"},{"body":"Spring boot client server application with rsocket\nGithub: https://github.com/gitorko/project02\nRsocket RSocket is a binary \u0026amp; message passing protocol for multiplexed, duplex communication over TCP, WebSocket, and other byte stream transports.\nInteraction Models\nType Description Request-Response send one message and receive one back Request-Stream send one message and receive a stream of messages back Channel send streams of messages in both directions Fire-and-Forget send a one-way message Key features of RSocket protocol\nReactive Streams - back pressure allows a requester to slow down a responder at the source, hence reducing reliance on network layer congestion control, and the need for buffering at the network level or at any level. Request throttling - \u0026quot;Leasing\u0026quot; after the LEASE frame that can be sent from each end to limit the total number of requests allowed by other end for a given time. Leases are renewed periodically. Session resumption - loss of connectivity and requires some state to be maintained. Fragmentation - re-assembly of large messages. Keepalive - heartbeats. Differences\nRSocket GRPC Rest Binary Protocol (TCP, a File, WebSockets) Works on HTTP2 (Protocol Buffers) Works on HTTP/1.1 Works on 5/6 layer of OSI model Works on 7 layer of OSI model Support Back pressure handling Code 1package com.demo.project02.rserver.controller; 2 3import java.time.Duration; 4import java.time.Instant; 5import java.util.stream.Stream; 6 7import com.demo.project02.rcommon.GreetingRequest; 8import com.demo.project02.rcommon.GreetingResponse; 9import org.springframework.messaging.handler.annotation.MessageMapping; 10import org.springframework.stereotype.Controller; 11import reactor.core.publisher.Flux; 12 13@Controller 14public class GreetingController { 15 16 /** 17 * Fire-Forget - No response 18 * Request-Response - Single value comes in, single value returned 19 * Request-Stream - Single value comes in, multiple values returned 20 * Channel - Multiple values comes in, multiple values returned. 21 */ 22 @MessageMapping(\u0026#34;greetings\u0026#34;) 23 Flux\u0026lt;GreetingResponse\u0026gt; greet(GreetingRequest greetingRequest) { 24 var stream = Stream.generate(() -\u0026gt; new GreetingResponse(\u0026#34;Hello \u0026#34; + greetingRequest.getName() + \u0026#34; @ \u0026#34; + Instant.now())); 25 return Flux.fromStream(stream) 26 .delayElements(Duration.ofSeconds(1)); 27 28 } 29} 1package com.demo.project02.rclient; 2 3import java.time.Duration; 4 5import com.demo.project02.rcommon.GreetingRequest; 6import com.demo.project02.rcommon.GreetingResponse; 7import lombok.SneakyThrows; 8import lombok.extern.slf4j.Slf4j; 9import org.springframework.boot.CommandLineRunner; 10import org.springframework.boot.SpringApplication; 11import org.springframework.boot.autoconfigure.SpringBootApplication; 12import org.springframework.boot.context.event.ApplicationReadyEvent; 13import org.springframework.context.ApplicationListener; 14import org.springframework.context.annotation.Bean; 15import org.springframework.messaging.rsocket.RSocketRequester; 16import reactor.core.publisher.Mono; 17import reactor.util.retry.Retry; 18 19@SpringBootApplication 20@Slf4j 21public class RclientApp { 22 23 @SneakyThrows 24 public static void main(String[] args) { 25 SpringApplication.run(RclientApp.class, args); 26 System.in.read(); 27 } 28 29 @Bean 30 public CommandLineRunner onStart() { 31 return (args) -\u0026gt; { 32 log.info(\u0026#34;On Start!\u0026#34;); 33 }; 34 } 35 36 @Bean 37 Mono\u0026lt;RSocketRequester\u0026gt; rSocketRequester(RSocketRequester.Builder builder) { 38 return builder 39 .rsocketConnector(connector -\u0026gt; connector 40 .reconnect(Retry.fixedDelay(Integer.MAX_VALUE, Duration.ofSeconds(1)))) 41 .connectTcp(\u0026#34;localhost\u0026#34;, 8888); 42 } 43 44 @Bean 45 ApplicationListener\u0026lt;ApplicationReadyEvent\u0026gt; client(Mono\u0026lt;RSocketRequester\u0026gt; client) { 46 return (args) -\u0026gt; { 47 var greetingResponseFlux = client.flatMapMany(rSocketRequester -\u0026gt; { 48 return rSocketRequester.route(\u0026#34;greetings\u0026#34;) 49 .data(new GreetingRequest(\u0026#34;Jack\u0026#34;)) 50 .retrieveFlux(GreetingResponse.class); 51 }); 52 greetingResponseFlux.subscribe(System.out::println); 53 }; 54 } 55} Setup 1# Project 02 2 3Spring - Rsocket 4 5[https://gitorko.github.io/spring-rsocket/](https://gitorko.github.io/spring-rsocket/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk 21.0.3 2024-04-16 LTS 14``` 15 16### Dev 17 18To run the code. 19 20```bash 21./gradlew clean build 22 23java -jar rserver/build/libs/rserver-1.0.0.jar 24java -jar rclient/build/libs/rclient-1.0.0.jar 25 26./gradlew :rserver:build 27./gradlew :rclient:build 28./gradlew :rcommon:build 29 30./gradlew :rserver:bootRun 31./gradlew :rclient:bootRun 32 33./gradlew bootJar 34``` References https://docs.spring.io/spring-framework/reference/rsocket.html\nhttps://medium.com/netifi/differences-between-grpc-and-rsocket-e736c954e60\n","link":"https://gitorko.github.io/post/spring-rsocket/","section":"post","tags":["rsocket","spring"],"title":"Spring - Rsocket"},{"body":"","link":"https://gitorko.github.io/categories/apache-ignite/","section":"categories","tags":null,"title":"Apache-Ignite"},{"body":"","link":"https://gitorko.github.io/tags/caching/","section":"tags","tags":null,"title":"Caching"},{"body":"Spring boot application with distributed locking using apache ignite\nGithub: https://github.com/gitorko/project04\nApache Ignite Apache Ignite is a distributed database. It supports distributed locking mechanism.\nCode 1package com.demo.project04.config; 2 3import java.util.ArrayList; 4import java.util.Collections; 5import java.util.List; 6 7import org.apache.ignite.Ignite; 8import org.apache.ignite.Ignition; 9import org.apache.ignite.cache.CacheAtomicityMode; 10import org.apache.ignite.configuration.CacheConfiguration; 11import org.apache.ignite.configuration.DataPageEvictionMode; 12import org.apache.ignite.configuration.DataRegionConfiguration; 13import org.apache.ignite.configuration.DataStorageConfiguration; 14import org.apache.ignite.configuration.IgniteConfiguration; 15import org.apache.ignite.kubernetes.configuration.KubernetesConnectionConfiguration; 16import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi; 17import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; 18import org.apache.ignite.spi.discovery.tcp.ipfinder.kubernetes.TcpDiscoveryKubernetesIpFinder; 19import org.apache.ignite.spi.discovery.tcp.ipfinder.multicast.TcpDiscoveryMulticastIpFinder; 20import org.springframework.beans.factory.annotation.Value; 21import org.springframework.context.annotation.Bean; 22import org.springframework.context.annotation.Configuration; 23 24@Configuration 25public class IgniteConfig { 26 27 /** 28 * Override the node name for each instance at start using properties 29 */ 30 @Value(\u0026#34;${ignite.nodeName:node0}\u0026#34;) 31 private String nodeName; 32 33 @Value(\u0026#34;${ignite.kubernetes.enabled:false}\u0026#34;) 34 private Boolean k8sEnabled; 35 36 private String k8sApiServer = \u0026#34;https://kubernetes.docker.internal:6443\u0026#34;; 37 private String k8sServiceName = \u0026#34;project04\u0026#34;; 38 private String k8sNameSpace = \u0026#34;default\u0026#34;; 39 40 @Bean(name = \u0026#34;igniteInstance\u0026#34;) 41 public Ignite igniteInstance() { 42 Ignite ignite = Ignition.start(igniteConfiguration()); 43 return ignite; 44 } 45 46 @Bean(name = \u0026#34;igniteConfiguration\u0026#34;) 47 public IgniteConfiguration igniteConfiguration() { 48 IgniteConfiguration cfg = new IgniteConfiguration(); 49 /** 50 * Uniquely identify node in a cluster use consistent Id. 51 */ 52 cfg.setConsistentId(nodeName); 53 54 cfg.setIgniteInstanceName(\u0026#34;my-ignite-instance\u0026#34;); 55 cfg.setPeerClassLoadingEnabled(true); 56 cfg.setLocalHost(\u0026#34;127.0.0.1\u0026#34;); 57 cfg.setMetricsLogFrequency(0); 58 59 cfg.setCommunicationSpi(tcpCommunicationSpi()); 60 if (k8sEnabled) { 61 cfg.setDiscoverySpi(tcpDiscoverySpiKubernetes()); 62 } else { 63 cfg.setDiscoverySpi(tcpDiscovery()); 64 } 65 cfg.setDataStorageConfiguration(dataStorageConfiguration()); 66 cfg.setCacheConfiguration(cacheConfiguration()); 67 return cfg; 68 } 69 70 @Bean(name = \u0026#34;cacheConfiguration\u0026#34;) 71 public CacheConfiguration[] cacheConfiguration() { 72 List\u0026lt;CacheConfiguration\u0026gt; cacheConfigurations = new ArrayList\u0026lt;\u0026gt;(); 73 cacheConfigurations.add(getLockCacheConfig()); 74 return cacheConfigurations.toArray(new CacheConfiguration[cacheConfigurations.size()]); 75 } 76 77 private CacheConfiguration getLockCacheConfig() { 78 /** 79 * Country cache to store key value pair 80 */ 81 CacheConfiguration cacheConfig = new CacheConfiguration(\u0026#34;lock-cache\u0026#34;); 82 /** 83 * This cache will be stored in non-persistent data region 84 */ 85 cacheConfig.setDataRegionName(\u0026#34;my-data-region\u0026#34;); 86 /** 87 * Needs to be transactional for getting distributed lock 88 */ 89 cacheConfig.setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL); 90 return cacheConfig; 91 } 92 93 /** 94 * Nodes discover each other over this port 95 */ 96 private TcpDiscoverySpi tcpDiscovery() { 97 TcpDiscoverySpi tcpDiscoverySpi = new TcpDiscoverySpi(); 98 TcpDiscoveryMulticastIpFinder ipFinder = new TcpDiscoveryMulticastIpFinder(); 99 ipFinder.setAddresses(Collections.singletonList(\u0026#34;127.0.0.1:47500..47509\u0026#34;)); 100 tcpDiscoverySpi.setIpFinder(ipFinder); 101 tcpDiscoverySpi.setLocalPort(47500); 102 // Changing local port range. This is an optional action. 103 tcpDiscoverySpi.setLocalPortRange(9); 104 //tcpDiscoverySpi.setLocalAddress(\u0026#34;localhost\u0026#34;); 105 return tcpDiscoverySpi; 106 } 107 108 private TcpDiscoverySpi tcpDiscoverySpiKubernetes() { 109 TcpDiscoverySpi spi = new TcpDiscoverySpi(); 110 KubernetesConnectionConfiguration kcfg = new KubernetesConnectionConfiguration(); 111 kcfg.setNamespace(k8sNameSpace); 112 kcfg.setMasterUrl(k8sApiServer); 113 TcpDiscoveryKubernetesIpFinder ipFinder = new TcpDiscoveryKubernetesIpFinder(kcfg); 114 ipFinder.setServiceName(k8sServiceName); 115 spi.setIpFinder(ipFinder); 116 return spi; 117 } 118 119 /** 120 * Nodes communicate with each other over this port 121 */ 122 private TcpCommunicationSpi tcpCommunicationSpi() { 123 TcpCommunicationSpi communicationSpi = new TcpCommunicationSpi(); 124 communicationSpi.setMessageQueueLimit(1024); 125 communicationSpi.setLocalAddress(\u0026#34;localhost\u0026#34;); 126 communicationSpi.setLocalPort(48100); 127 communicationSpi.setSlowClientQueueLimit(1000); 128 return communicationSpi; 129 } 130 131 private DataStorageConfiguration dataStorageConfiguration() { 132 DataStorageConfiguration dsc = new DataStorageConfiguration(); 133 DataRegionConfiguration defaultRegionCfg = new DataRegionConfiguration(); 134 DataRegionConfiguration regionCfg = new DataRegionConfiguration(); 135 136 defaultRegionCfg.setName(\u0026#34;default-data-region\u0026#34;); 137 defaultRegionCfg.setInitialSize(10 * 1024 * 1024); //10MB 138 defaultRegionCfg.setMaxSize(50 * 1024 * 1024); //50MB 139 defaultRegionCfg.setPersistenceEnabled(false); 140 defaultRegionCfg.setPageEvictionMode(DataPageEvictionMode.RANDOM_LRU); 141 142 regionCfg.setName(\u0026#34;my-data-region\u0026#34;); 143 regionCfg.setInitialSize(10 * 1024 * 1024); //10MB 144 regionCfg.setMaxSize(50 * 1024 * 1024); //50MB 145 regionCfg.setPersistenceEnabled(false); 146 147 dsc.setDefaultDataRegionConfiguration(defaultRegionCfg); 148 dsc.setDataRegionConfigurations(regionCfg); 149 150 return dsc; 151 } 152 153} 1package com.demo.project04.service; 2 3import java.time.Instant; 4import java.util.concurrent.TimeUnit; 5import java.util.concurrent.locks.Lock; 6 7import jakarta.annotation.PostConstruct; 8import lombok.RequiredArgsConstructor; 9import lombok.SneakyThrows; 10import lombok.extern.slf4j.Slf4j; 11import org.apache.ignite.Ignite; 12import org.apache.ignite.IgniteCache; 13import org.springframework.beans.factory.annotation.Value; 14import org.springframework.stereotype.Service; 15 16/** 17 * Interact with Ignite as key-value store (non-persistent store) 18 */ 19@Service 20@RequiredArgsConstructor 21@Slf4j 22public class LockService { 23 24 final Ignite ignite; 25 IgniteCache\u0026lt;String, String\u0026gt; cache; 26 27 @Value(\u0026#34;${ignite.nodeName:node0}\u0026#34;) 28 private String nodeName; 29 30 @PostConstruct 31 public void postInit() { 32 cache = ignite.cache(\u0026#34;lock-cache\u0026#34;); 33 } 34 35 public String runJob(Integer seconds) { 36 log.info(\u0026#34;Acquiring Lock by {}\u0026#34;, nodeName); 37 Lock myLock = cache.lock(\u0026#34;lock-01\u0026#34;); 38 try { 39 myLock.lock(); 40 return executeTask(seconds); 41 } finally { 42 myLock.unlock(); 43 } 44 } 45 46 @SneakyThrows 47 private String executeTask(Integer seconds) { 48 log.info(\u0026#34;Starting job by {}\u0026#34;, nodeName); 49 log.info(\u0026#34;Sleeping for {} secs\u0026#34;, seconds); 50 TimeUnit.SECONDS.sleep(seconds); 51 log.info(\u0026#34;Finished job by {}\u0026#34;, nodeName); 52 return \u0026#34;Job completed by \u0026#34; + nodeName + \u0026#34; @ \u0026#34; + Instant.now(); 53 } 54 55} 1apiVersion: apps/v1 2kind: StatefulSet 3metadata: 4 name: project04 5spec: 6 selector: 7 matchLabels: 8 app: project04 9 serviceName: \u0026#34;project04\u0026#34; 10 replicas: 1 11 template: 12 metadata: 13 labels: 14 app: project04 15 spec: 16 containers: 17 - name: project04 18 image: project04:1.0.0 19 imagePullPolicy: IfNotPresent 20 env: 21 - name: ignite.nodeName 22 valueFrom: 23 fieldRef: 24 fieldPath: metadata.name 25 ports: 26 - containerPort: 47100 # communication SPI port 27 - containerPort: 47500 # discovery SPI port 28 - containerPort: 49112 # dafault JMX port 29 - containerPort: 10800 # thin clients/JDBC driver port 30 - containerPort: 8080 # REST API 31 volumeMounts: 32 - mountPath: /ignite/data 33 name: ignite 34 resources: 35 limits: 36 cpu: \u0026#34;1\u0026#34; 37 memory: \u0026#34;500Mi\u0026#34; 38 volumes: 39 - name: ignite 40 persistentVolumeClaim: 41 claimName: ignite-pv-claim 42--- 43apiVersion: v1 44kind: PersistentVolume 45metadata: 46 name: ignite-pv-volume 47 labels: 48 type: local 49 app: project04 50spec: 51 storageClassName: manual 52 capacity: 53 storage: 5Gi 54 accessModes: 55 - ReadWriteMany 56 hostPath: 57 path: \u0026#34;/tmp/data\u0026#34; 58--- 59apiVersion: v1 60kind: PersistentVolumeClaim 61metadata: 62 name: ignite-pv-claim 63 labels: 64 app: project04 65spec: 66 storageClassName: manual 67 accessModes: 68 - ReadWriteMany 69 resources: 70 requests: 71 storage: 5Gi 72--- 73kind: Service 74apiVersion: v1 75metadata: 76 name: project04 77spec: 78 ports: 79 - port: 8080 80 targetPort: 8080 81 name: http 82 selector: 83 app: project04 84 type: LoadBalancer Postman Import the postman collection to postman\nPostman Collection\nSetup 1# Project 04 2 3Distributed Locking - Apache Ignite 4 5[https://gitorko.github.io/distributed-locking-apache-ignite/](https://gitorko.github.io/distributed-locking-apache-ignite/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk 21.0.3 2024-04-16 LTS 14``` 15 16 17### Dev 18 19To run the code. 20 21```bash 22./gradlew clean build 23./gradlew bootRun 24./gradlew bootJar 25``` 26 27To run many node instances 28 29```bash 30cd build/libs 31java -jar project04-1.0.0.jar --server.port=8081 --ignite.nodeName=node1 32java -jar project04-1.0.0.jar --server.port=8082 --ignite.nodeName=node2 33java -jar project04-1.0.0.jar --server.port=8083 --ignite.nodeName=node3 34 35``` 36 37JVM tuning parameters 38 39```bash 40java -jar -Xms1024m -Xmx2048m -XX:MaxDirectMemorySize=256m -XX:+DisableExplicitGC -XX:+UseG1GC -XX:+ScavengeBeforeFullGC -XX:+AlwaysPreTouch project04-1.0.0.jar --server.port=8080 --ignite.nodeName=node0 41``` 42 43 44Create a service account 45 46```bash 47kubectl apply -f - \u0026lt;\u0026lt;EOF 48apiVersion: v1 49kind: Secret 50metadata: 51 name: default-secret 52 annotations: 53 kubernetes.io/service-account.name: default 54type: kubernetes.io/service-account-token 55EOF 56``` 57 58Edit the service account and update the last 2 lines 59 60```bash 61kubectl edit serviceaccounts default 62 63apiVersion: v1 64kind: ServiceAccount 65metadata: 66 creationTimestamp: \u0026#34;XXXX-XX-XXTXX:XX:XXZ\u0026#34; 67 name: default 68 namespace: default 69 resourceVersion: \u0026#34;XXXX\u0026#34; 70 uid: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX 71secrets: 72 - name: default-secret 73``` 74Check if token is created 75 76```bash 77kubectl describe secret default 78``` 79 80Provide admin role to the service account 81 82```bash 83kubectl apply -f - \u0026lt;\u0026lt;EOF 84apiVersion: rbac.authorization.k8s.io/v1 85kind: ClusterRoleBinding 86metadata: 87 name: admin-user 88roleRef: 89 apiGroup: rbac.authorization.k8s.io 90 kind: ClusterRole 91 name: cluster-admin 92subjects: 93- kind: ServiceAccount 94 name: default 95 namespace: default 96EOF 97``` 98 99Build the docker image 100 101```bash 102docker build -f docker/Dockerfile --force-rm -t project04:1.0.0 . 103``` 104 105Deploy to k8s 106 107```bash 108mkdir /tmp/data 109kubectl apply -f docker/deployment.yaml 110kubectl get pods -w 111 112kubectl config set-context --current --namespace=default 113kubectl get deployments 114kubectl scale statefulset project04 --replicas=3 115kubectl scale deployment project04 --replicas=1 116``` 117 118Clean up 119 120```bash 121kubectl delete -f docker/deployment.yaml 122``` References https://ignite.apache.org/\n","link":"https://gitorko.github.io/post/distributed-locking-apache-ignite/","section":"post","tags":["ignite","distributed-lock","k8s","kubernetes"],"title":"Distributed Locking - Apache Ignite"},{"body":"","link":"https://gitorko.github.io/tags/distributed-lock/","section":"tags","tags":null,"title":"Distributed-Lock"},{"body":"","link":"https://gitorko.github.io/tags/ignite/","section":"tags","tags":null,"title":"Ignite"},{"body":"","link":"https://gitorko.github.io/tags/k8s/","section":"tags","tags":null,"title":"K8s"},{"body":"Spring boot application with apache ignite integration\nGithub: https://github.com/gitorko/project91\nApache Ignite Apache Ignite is a distributed database. Data in Ignite is stored in-memory and/or on-disk, and is either partitioned or replicated across a cluster of multiple nodes.\nFeatures\nKey-value Store In-Memory Cache In-Memory Data Grid - Data stored on multiple nodes In-Memory Database - Disk based persistence ACID transactions - Only at the key-value level SQL queries - Does not support foreign key constraints Stream processing Distributed compute - Uses co-located data Messaging queue Multi-tier storage - Storage in postgres, mysql Distributed SQL (Sql queries to fetch data stored on different nodes) Apache Ignite Setup\nEmbedded server Embedded client Cluster setup Apache Ignite automatically synchronizes the changes with the database in an asynchronous, background task If an entity is not cached it is read from the database and put to the cache for future use.\nTerminology Cache Mode Partition Cache (Default) - Data is partitioned and each partition is stored on different node (Fast write) Replicated Cache - Data will be replicated across all cache (Slow write) Affinity Function - Determines the partition data belongs to Backup - Backup of partition is stored with primary and secondary owner Atomicity Mode Atomic (Default) - Operations performed atomically, one at a time. Transactions are not supported. Transactional - ACID-compliant transactions (Slow) Eviction - LRU When persistence is off some data is evicted \u0026amp; lost When persistence is on then page is evicted from memory but data is present on disk Redis vs Apache Ignite While Redis stores data in memory, Ignite relies on memory and disk to store data. Hence, Ignite can store much larger amounts of data than Redis\nEhCache vs Apache Ignite Ehcache is more focused on local caching and does not provide built-in support for distributed caching or computing. EhCache primarily intended for single-node caching scenarios.\nScalability and Distributed Computing - Easily scaled across multiple nodes in a cluster. It allows for data and computation to be distributed across the nodes, providing high availability and fault tolerance. Data Partitioning and Replication: - Offers advanced data partitioning and replication capabilities. It automatically partitions the data across multiple nodes in a cluster, ensuring that each node only holds a portion of the overall data set. This enables parallel processing and efficient data retrieval. In addition, Ignite allows for configurable data replication, ensuring data redundancy and fault tolerance. Computational Capabilities - Supports running distributed computations across the cluster, allowing for parallel processing and improved performance. It provides APIs for distributed SQL queries, machine learning, and real-time streaming analytics. Integration with Other Technologies: Integrates seamlessly with various other technologies and frameworks. It provides connectors and integrations for popular data processing frameworks like Apache Spark, Apache Hadoop, and Apache Cassandra. It also offers support for various persistence stores, such as JDBC, NoSQL databases, and Hadoop Distributed File System (HDFS). Transaction Support: Supports distributed transactions, allowing multiple nodes in a cluster to participate in a single transaction. It ensures consistency and isolation across the distributed cache. Management and Monitoring Capabilities: Offers a web-based management console for monitoring the cluster status, metrics, and performance. Code 1package com.demo.project91.config; 2 3import java.sql.Types; 4import java.util.ArrayList; 5import java.util.Collections; 6import java.util.LinkedHashMap; 7import java.util.List; 8import javax.cache.configuration.Factory; 9import javax.sql.DataSource; 10 11import com.demo.project91.pojo.Customer; 12import com.demo.project91.pojo.Employee; 13import org.apache.ignite.Ignite; 14import org.apache.ignite.Ignition; 15import org.apache.ignite.cache.CacheAtomicityMode; 16import org.apache.ignite.cache.CacheMode; 17import org.apache.ignite.cache.QueryEntity; 18import org.apache.ignite.cache.store.jdbc.CacheJdbcPojoStoreFactory; 19import org.apache.ignite.cache.store.jdbc.JdbcType; 20import org.apache.ignite.cache.store.jdbc.JdbcTypeField; 21import org.apache.ignite.cache.store.jdbc.dialect.BasicJdbcDialect; 22import org.apache.ignite.cluster.ClusterState; 23import org.apache.ignite.configuration.CacheConfiguration; 24import org.apache.ignite.configuration.DataPageEvictionMode; 25import org.apache.ignite.configuration.DataRegionConfiguration; 26import org.apache.ignite.configuration.DataStorageConfiguration; 27import org.apache.ignite.configuration.IgniteConfiguration; 28import org.apache.ignite.kubernetes.configuration.KubernetesConnectionConfiguration; 29import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi; 30import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; 31import org.apache.ignite.spi.discovery.tcp.ipfinder.kubernetes.TcpDiscoveryKubernetesIpFinder; 32import org.apache.ignite.spi.discovery.tcp.ipfinder.multicast.TcpDiscoveryMulticastIpFinder; 33import org.apache.ignite.springdata.repository.config.EnableIgniteRepositories; 34import org.springframework.beans.factory.annotation.Value; 35import org.springframework.context.annotation.Bean; 36import org.springframework.context.annotation.Configuration; 37import org.springframework.jdbc.datasource.DriverManagerDataSource; 38 39@Configuration 40@EnableIgniteRepositories(\u0026#34;com.demo.project91.repository\u0026#34;) 41public class IgniteConfig { 42 43 /** 44 * Override the node name for each instance at start using properties 45 */ 46 @Value(\u0026#34;${ignite.nodeName:node0}\u0026#34;) 47 private String nodeName; 48 49 @Value(\u0026#34;${ignite.kubernetes.enabled:false}\u0026#34;) 50 private Boolean k8sEnabled; 51 52 private String k8sApiServer = \u0026#34;https://kubernetes.docker.internal:6443\u0026#34;; 53 private String k8sServiceName = \u0026#34;project04\u0026#34;; 54 private String k8sNameSpace = \u0026#34;default\u0026#34;; 55 56 @Bean(name = \u0026#34;igniteInstance\u0026#34;) 57 public Ignite igniteInstance() { 58 Ignite ignite = Ignition.start(igniteConfiguration()); 59 60 /** 61 * If data is persisted then have to explicitly set the cluster state to active. 62 */ 63 ignite.cluster().state(ClusterState.ACTIVE); 64 return ignite; 65 } 66 67 @Bean(name = \u0026#34;igniteConfiguration\u0026#34;) 68 public IgniteConfiguration igniteConfiguration() { 69 IgniteConfiguration cfg = new IgniteConfiguration(); 70 /** 71 * Uniquely identify node in a cluster use consistent Id. 72 */ 73 cfg.setConsistentId(nodeName); 74 75 cfg.setIgniteInstanceName(\u0026#34;my-ignite-instance\u0026#34;); 76 cfg.setPeerClassLoadingEnabled(true); 77 cfg.setLocalHost(\u0026#34;127.0.0.1\u0026#34;); 78 cfg.setMetricsLogFrequency(0); 79 80 cfg.setCommunicationSpi(tcpCommunicationSpi()); 81 if (k8sEnabled) { 82 cfg.setDiscoverySpi(tcpDiscoverySpiKubernetes()); 83 } else { 84 cfg.setDiscoverySpi(tcpDiscovery()); 85 } 86 cfg.setDataStorageConfiguration(dataStorageConfiguration()); 87 cfg.setCacheConfiguration(cacheConfiguration()); 88 return cfg; 89 } 90 91 @Bean(name = \u0026#34;cacheConfiguration\u0026#34;) 92 public CacheConfiguration[] cacheConfiguration() { 93 List\u0026lt;CacheConfiguration\u0026gt; cacheConfigurations = new ArrayList\u0026lt;\u0026gt;(); 94 cacheConfigurations.add(getAccountCacheConfig()); 95 cacheConfigurations.add(getCustomerCacheConfig()); 96 cacheConfigurations.add(getCountryCacheConfig()); 97 cacheConfigurations.add(getEmployeeCacheConfig()); 98 return cacheConfigurations.toArray(new CacheConfiguration[cacheConfigurations.size()]); 99 } 100 101 private CacheConfiguration getAccountCacheConfig() { 102 /** 103 * Ignite table to store Account data 104 */ 105 CacheConfiguration cacheConfig = new CacheConfiguration(); 106 cacheConfig.setAtomicityMode(CacheAtomicityMode.ATOMIC); 107 cacheConfig.setCacheMode(CacheMode.REPLICATED); 108 cacheConfig.setName(\u0026#34;account-cache\u0026#34;); 109 cacheConfig.setStatisticsEnabled(true); 110 QueryEntity qe = new QueryEntity(); 111 qe.setTableName(\u0026#34;ACCOUNTS\u0026#34;); 112 qe.setKeyFieldName(\u0026#34;ID\u0026#34;); 113 qe.setKeyType(\u0026#34;java.lang.Long\u0026#34;); 114 qe.setValueType(\u0026#34;java.lang.Object\u0026#34;); 115 LinkedHashMap map = new LinkedHashMap(); 116 map.put(\u0026#34;ID\u0026#34;, \u0026#34;java.lang.Long\u0026#34;); 117 map.put(\u0026#34;amount\u0026#34;, \u0026#34;java.lang.Double\u0026#34;); 118 map.put(\u0026#34;updateDate\u0026#34;, \u0026#34;java.util.Date\u0026#34;); 119 qe.setFields(map); 120 cacheConfig.setQueryEntities(List.of(qe)); 121 return cacheConfig; 122 } 123 124 private CacheConfiguration\u0026lt;Long, Customer\u0026gt; getCustomerCacheConfig() { 125 /** 126 * Customer cache to store Customer.class objects 127 */ 128 CacheConfiguration\u0026lt;Long, Customer\u0026gt; cacheConfig = new CacheConfiguration(\u0026#34;customer-cache\u0026#34;); 129 cacheConfig.setIndexedTypes(Long.class, Customer.class); 130 return cacheConfig; 131 } 132 133 private CacheConfiguration getCountryCacheConfig() { 134 /** 135 * Country cache to store key value pair 136 */ 137 CacheConfiguration cacheConfig = new CacheConfiguration(\u0026#34;country-cache\u0026#34;); 138 /** 139 * This cache will be stored in non-persistent data region 140 */ 141 cacheConfig.setDataRegionName(\u0026#34;my-data-region\u0026#34;); 142 return cacheConfig; 143 } 144 145 private CacheConfiguration\u0026lt;Long, Employee\u0026gt; getEmployeeCacheConfig() { 146 /** 147 * Employee cache to store Employee.class objects 148 */ 149 CacheConfiguration\u0026lt;Long, Employee\u0026gt; cacheConfig = new CacheConfiguration(\u0026#34;employee-cache\u0026#34;); 150 cacheConfig.setIndexedTypes(Long.class, Employee.class); 151 cacheConfig.setCacheStoreFactory(cacheJdbcPojoStoreFactory()); 152 /** 153 * If value not present in cache then fetch from db and store in cache 154 */ 155 cacheConfig.setReadThrough(true); 156 /** 157 * If value present in cache then write to db. 158 */ 159 cacheConfig.setWriteThrough(true); 160 /** 161 * Will wait for sometime to update db asynchronously 162 */ 163 cacheConfig.setWriteBehindEnabled(true); 164 /** 165 * Min 2 entires in cache before written to db 166 */ 167 cacheConfig.setWriteBehindFlushSize(2); 168 /** 169 * Write to DB at interval delay of 2 seconds 170 */ 171 cacheConfig.setWriteBehindFlushFrequency(2000); 172 cacheConfig.setIndexedTypes(Long.class, Employee.class); 173 return cacheConfig; 174 } 175 176 private CacheJdbcPojoStoreFactory cacheJdbcPojoStoreFactory() { 177 CacheJdbcPojoStoreFactory\u0026lt;Long, Employee\u0026gt; factory = new CacheJdbcPojoStoreFactory\u0026lt;\u0026gt;(); 178 factory.setDialect(new BasicJdbcDialect()); 179 180 //factory.setDataSourceFactory(getDataSourceFactory()); 181 factory.setDataSourceFactory(new DbFactory()); 182 JdbcType employeeType = getEmployeeJdbcType(); 183 factory.setTypes(employeeType); 184 return factory; 185 } 186 187 /** 188 * Nodes discover each other over this port 189 */ 190 private TcpDiscoverySpi tcpDiscovery() { 191 TcpDiscoverySpi tcpDiscoverySpi = new TcpDiscoverySpi(); 192 TcpDiscoveryMulticastIpFinder ipFinder = new TcpDiscoveryMulticastIpFinder(); 193 ipFinder.setAddresses(Collections.singletonList(\u0026#34;127.0.0.1:47500..47509\u0026#34;)); 194 tcpDiscoverySpi.setIpFinder(ipFinder); 195 tcpDiscoverySpi.setLocalPort(47500); 196 // Changing local port range. This is an optional action. 197 tcpDiscoverySpi.setLocalPortRange(9); 198 //tcpDiscoverySpi.setLocalAddress(\u0026#34;localhost\u0026#34;); 199 return tcpDiscoverySpi; 200 } 201 202 private TcpDiscoverySpi tcpDiscoverySpiKubernetes() { 203 TcpDiscoverySpi spi = new TcpDiscoverySpi(); 204 KubernetesConnectionConfiguration kcfg = new KubernetesConnectionConfiguration(); 205 kcfg.setNamespace(k8sNameSpace); 206 kcfg.setMasterUrl(k8sApiServer); 207 TcpDiscoveryKubernetesIpFinder ipFinder = new TcpDiscoveryKubernetesIpFinder(kcfg); 208 ipFinder.setServiceName(k8sServiceName); 209 spi.setIpFinder(ipFinder); 210 return spi; 211 } 212 213 /** 214 * Nodes communicate with each other over this port 215 */ 216 private TcpCommunicationSpi tcpCommunicationSpi() { 217 TcpCommunicationSpi communicationSpi = new TcpCommunicationSpi(); 218 communicationSpi.setMessageQueueLimit(1024); 219 communicationSpi.setLocalAddress(\u0026#34;localhost\u0026#34;); 220 communicationSpi.setLocalPort(48100); 221 communicationSpi.setSlowClientQueueLimit(1000); 222 return communicationSpi; 223 } 224 225 private DataStorageConfiguration dataStorageConfiguration() { 226 DataStorageConfiguration dsc = new DataStorageConfiguration(); 227 DataRegionConfiguration defaultRegionCfg = new DataRegionConfiguration(); 228 DataRegionConfiguration regionCfg = new DataRegionConfiguration(); 229 230 defaultRegionCfg.setName(\u0026#34;default-data-region\u0026#34;); 231 defaultRegionCfg.setInitialSize(10 * 1024 * 1024); //10MB 232 defaultRegionCfg.setMaxSize(50 * 1024 * 1024); //50MB 233 234 /** 235 * The cache will be persisted on default region 236 */ 237 defaultRegionCfg.setPersistenceEnabled(true); 238 239 /** 240 * Eviction mode 241 */ 242 defaultRegionCfg.setPageEvictionMode(DataPageEvictionMode.RANDOM_LRU); 243 244 regionCfg.setName(\u0026#34;my-data-region\u0026#34;); 245 regionCfg.setInitialSize(10 * 1024 * 1024); //10MB 246 regionCfg.setMaxSize(50 * 1024 * 1024); //50MB 247 /** 248 * Cache in this region will not be persisted 249 */ 250 regionCfg.setPersistenceEnabled(false); 251 252 dsc.setDefaultDataRegionConfiguration(defaultRegionCfg); 253 dsc.setDataRegionConfigurations(regionCfg); 254 255 return dsc; 256 } 257 258 /** 259 * Since it serializes you cant pass variables. Use the DbFactory.class 260 */ 261 private Factory\u0026lt;DataSource\u0026gt; getDataSourceFactory() { 262 return () -\u0026gt; { 263 DriverManagerDataSource driverManagerDataSource = new DriverManagerDataSource(); 264 driverManagerDataSource.setDriverClassName(\u0026#34;org.postgresql.Driver\u0026#34;); 265 driverManagerDataSource.setUrl(\u0026#34;jdbc:postgresql://localhost:5432/test-db\u0026#34;); 266 driverManagerDataSource.setUsername(\u0026#34;test\u0026#34;); 267 driverManagerDataSource.setPassword(\u0026#34;test@123\u0026#34;); 268 return driverManagerDataSource; 269 }; 270 } 271 272 private JdbcType getEmployeeJdbcType() { 273 JdbcType employeeType = new JdbcType(); 274 employeeType.setCacheName(\u0026#34;employee-cache\u0026#34;); 275 employeeType.setDatabaseTable(\u0026#34;employee\u0026#34;); 276 employeeType.setKeyType(Long.class); 277 employeeType.setKeyFields(new JdbcTypeField(Types.BIGINT, \u0026#34;id\u0026#34;, Long.class, \u0026#34;id\u0026#34;)); 278 employeeType.setValueFields( 279 new JdbcTypeField(Types.BIGINT, \u0026#34;id\u0026#34;, Long.class, \u0026#34;id\u0026#34;), 280 new JdbcTypeField(Types.VARCHAR, \u0026#34;name\u0026#34;, String.class, \u0026#34;name\u0026#34;), 281 new JdbcTypeField(Types.VARCHAR, \u0026#34;email\u0026#34;, String.class, \u0026#34;email\u0026#34;) 282 ); 283 employeeType.setValueType(Employee.class); 284 return employeeType; 285 } 286 287} 1package com.demo.project91.config; 2 3import org.apache.ignite.cache.spring.SpringCacheManager; 4import org.apache.ignite.configuration.IgniteConfiguration; 5import org.springframework.cache.annotation.EnableCaching; 6import org.springframework.context.annotation.Bean; 7import org.springframework.context.annotation.Configuration; 8 9@Configuration 10@EnableCaching 11public class SpringCacheConfig { 12 @Bean 13 public SpringCacheManager cacheManager() { 14 SpringCacheManager cacheManager = new SpringCacheManager(); 15 cacheManager.setConfiguration(getSpringCacheIgniteConfiguration()); 16 return cacheManager; 17 } 18 19 private IgniteConfiguration getSpringCacheIgniteConfiguration() { 20 return new IgniteConfiguration() 21 .setIgniteInstanceName(\u0026#34;spring-ignite-instance\u0026#34;) 22 .setMetricsLogFrequency(0); 23 } 24} 1package com.demo.project91.config; 2 3import java.io.Serializable; 4import javax.cache.configuration.Factory; 5import javax.sql.DataSource; 6 7import org.springframework.beans.factory.annotation.Value; 8import org.springframework.context.annotation.Configuration; 9import org.springframework.jdbc.datasource.DriverManagerDataSource; 10 11@Configuration 12public class DbFactory implements Serializable, Factory\u0026lt;DataSource\u0026gt; { 13 14 private static final long serialVersionUID = -1L; 15 16 @Value(\u0026#34;${spring.datasource.url}\u0026#34;) 17 private String jdbcUrl; 18 19 @Value(\u0026#34;${spring.datasource.username}\u0026#34;) 20 private String username; 21 22 @Value(\u0026#34;${spring.datasource.password}\u0026#34;) 23 private String password; 24 25 @Override 26 public DataSource create() { 27 DriverManagerDataSource driverManagerDataSource = new DriverManagerDataSource(); 28 driverManagerDataSource.setDriverClassName(\u0026#34;org.postgresql.Driver\u0026#34;); 29 driverManagerDataSource.setUrl(jdbcUrl); 30 driverManagerDataSource.setUsername(username); 31 driverManagerDataSource.setPassword(password); 32 return driverManagerDataSource; 33 } 34} 1package com.demo.project91.service; 2 3import java.util.Optional; 4 5import com.demo.project91.pojo.Customer; 6import com.demo.project91.repository.CustomerRepository; 7import lombok.RequiredArgsConstructor; 8import lombok.extern.slf4j.Slf4j; 9import org.springframework.stereotype.Service; 10 11/** 12 * Interact with Ignite via IgniteRepository 13 */ 14@Service 15@RequiredArgsConstructor 16@Slf4j 17public class CustomerService { 18 19 final CustomerRepository customerRepository; 20 21 public Customer saveCustomer(Customer customer) { 22 return customerRepository.save(customer.getId(), customer); 23 } 24 25 public Iterable\u0026lt;Customer\u0026gt; getAllCustomers() { 26 return customerRepository.findAll(); 27 } 28 29 public Optional\u0026lt;Customer\u0026gt; getCustomerById(Long id) { 30 return customerRepository.findById(id); 31 } 32 33} 1package com.demo.project91.service; 2 3import java.util.ArrayList; 4import java.util.List; 5import javax.cache.Cache; 6 7import com.demo.project91.pojo.Employee; 8import jakarta.annotation.PostConstruct; 9import lombok.RequiredArgsConstructor; 10import org.apache.ignite.Ignite; 11import org.apache.ignite.IgniteCache; 12import org.springframework.stereotype.Service; 13 14/** 15 * Interact with Ignite as key-value store (persistent store) 16 */ 17@Service 18@RequiredArgsConstructor 19public class EmployeeService { 20 21 final Ignite ignite; 22 IgniteCache\u0026lt;Long, Employee\u0026gt; cache; 23 24 @PostConstruct 25 public void postInit() { 26 cache = ignite.cache(\u0026#34;employee-cache\u0026#34;); 27 } 28 29 public Employee save(Employee employee) { 30 cache.put(employee.getId(), employee); 31 return employee; 32 } 33 34 public Iterable\u0026lt;Employee\u0026gt; getAllEmployees() { 35 List\u0026lt;Employee\u0026gt; employees = new ArrayList\u0026lt;\u0026gt;(); 36 for (Cache.Entry\u0026lt;Long, Employee\u0026gt; e : cache) { 37 employees.add(e.getValue()); 38 } 39 return employees; 40 } 41} 1package com.demo.project91.service; 2 3import java.sql.ResultSet; 4import java.sql.SQLException; 5import java.util.List; 6 7import com.demo.project91.pojo.Company; 8import lombok.RequiredArgsConstructor; 9import lombok.extern.slf4j.Slf4j; 10import org.springframework.cache.annotation.Cacheable; 11import org.springframework.jdbc.core.JdbcTemplate; 12import org.springframework.jdbc.core.RowMapper; 13import org.springframework.stereotype.Service; 14 15/** 16 * Interact with Ignite via Spring @Cacheable abstraction 17 */ 18@Service 19@Slf4j 20@RequiredArgsConstructor 21public class CompanyService { 22 23 final JdbcTemplate jdbcTemplate; 24 25 @Cacheable(value = \u0026#34;company-cache\u0026#34;) 26 public List\u0026lt;Company\u0026gt; getAllCompanies() { 27 log.info(\u0026#34;Fetching company from database!\u0026#34;); 28 return jdbcTemplate.query(\u0026#34;select * from company\u0026#34;, new CompanyRowMapper()); 29 } 30 31 public void insertMockData() { 32 log.info(\u0026#34;Starting to insert mock data!\u0026#34;); 33 for (int i = 0; i \u0026lt; 10000; i++) { 34 jdbcTemplate.update(\u0026#34;INSERT INTO company (id, name) \u0026#34; + \u0026#34;VALUES (?, ?)\u0026#34;, 35 i, \u0026#34;company_\u0026#34; + i); 36 } 37 log.info(\u0026#34;Completed insert of mock data!\u0026#34;); 38 } 39 40 class CompanyRowMapper implements RowMapper\u0026lt;Company\u0026gt; { 41 @Override 42 public Company mapRow(ResultSet rs, int rowNum) throws SQLException { 43 return new Company(rs.getLong(\u0026#34;id\u0026#34;), rs.getString(\u0026#34;name\u0026#34;)); 44 } 45 } 46} Postman Import the postman collection to postman\nPostman Collection\nSetup 1# Project 91 2 3Spring Boot - Apache Ignite 4 5[https://gitorko.github.io/spring-boot-apache-ignite/](https://gitorko.github.io/spring-boot-apache-ignite/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk 21.0.3 2024-04-16 LTS 14``` 15 16### Postgres DB 17 18``` 19docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:9.6.10 20docker ps 21docker exec -it pg-container psql -U postgres -W postgres 22CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 23CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 24grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 25 26docker stop pg-container 27docker start pg-container 28``` 29 30### Dev 31 32To run the code. 33 34```bash 35export JAVA_TOOL_OPTIONS=\u0026#34;--add-opens=jdk.management/com.sun.management.internal=ALL-UNNAMED \\ 36--add-opens=java.base/jdk.internal.misc=ALL-UNNAMED \\ 37--add-opens=java.base/sun.nio.ch=ALL-UNNAMED \\ 38--add-opens=java.management/com.sun.jmx.mbeanserver=ALL-UNNAMED \\ 39--add-opens=jdk.internal.jvmstat/sun.jvmstat.monitor=ALL-UNNAMED \\ 40--add-opens=java.base/sun.reflect.generics.reflectiveObjects=ALL-UNNAMED \\ 41--add-opens=java.base/java.io=ALL-UNNAMED \\ 42--add-opens=java.base/java.lang=ALL-UNNAMED \\ 43--add-opens=java.base/java.nio=ALL-UNNAMED \\ 44--add-opens=java.base/java.time=ALL-UNNAMED \\ 45--add-opens=java.base/java.util=ALL-UNNAMED \\ 46--add-opens=java.base/java.util.concurrent=ALL-UNNAMED \\ 47--add-opens=java.base/java.util.concurrent.locks=ALL-UNNAMED \\ 48--add-opens=java.base/java.lang.invoke=ALL-UNNAMED\u0026#34; 49 50./gradlew clean build 51./gradlew bootRun 52./gradlew bootJar 53``` 54 55To run many node instances 56 57```bash 58cd build/libs 59java -jar project91-1.0.0.jar --server.port=8081 --ignite.nodeName=node1 60java -jar project91-1.0.0.jar --server.port=8082 --ignite.nodeName=node2 61java -jar project91-1.0.0.jar --server.port=8083 --ignite.nodeName=node3 62 63``` 64 65JVM tuning parameters 66 67```bash 68java -jar -Xms1024m -Xmx2048m -XX:MaxDirectMemorySize=256m -XX:+DisableExplicitGC -XX:+UseG1GC -XX:+ScavengeBeforeFullGC -XX:+AlwaysPreTouch project91-1.0.0.jar --server.port=8080 --ignite.nodeName=node0 69``` Issues Standard CrudRepository save(entity), save(entities), delete(entity) operations aren't supported. We have to use the save(key, value), save(Map\u0026lt;ID, Entity\u0026gt; values), deleteAll(Iterable ids) methods.\n@EnableIgniteRepositories declared on IgniteConfig: Can not perform the operation because the cluster is inactive. Note, that the cluster is considered inactive by default if Ignite Persistent Store is used to let all the nodes join the cluster. To activate the cluster call Ignite.cluster().state(ClusterState.ACTIVE).\n1Possible too long JVM pause: 418467 milliseconds. 2Blocked system-critical thread has been detected. This can lead to cluster-wide undefined behaviour GC pauses decreases overall performance. if pause will be longer than failureDetectionTimeout node will be disconnected from cluster. https://apacheignite.readme.io/docs/jvm-and-system-tuning\n1 Failed to add node to topology because it has the same hash code for partitioned affinity as one of existing nodes Instance cant have same node id.\nWhen 3 nodes are running you will see the cluster\n1Topology snapshot [ver=3, locNode=2e963fb3, servers=3, clients=0, state=ACTIVE, CPUs=16, offheap=38.0GB, heap=24.0GB] 1Failed to validate cache configuration. Cache store factory is not serializable. CacheJdbcPojoStoreFactory will be serialized hence needs to implement Serializable\nReferences https://ignite.apache.org/\n","link":"https://gitorko.github.io/post/spring-apache-ignite/","section":"post","tags":["ignite","caching","postgres"],"title":"Spring - Apache Ignite"},{"body":"","link":"https://gitorko.github.io/categories/hibernate/","section":"categories","tags":null,"title":"Hibernate"},{"body":"","link":"https://gitorko.github.io/tags/jointable/","section":"tags","tags":null,"title":"Jointable"},{"body":"","link":"https://gitorko.github.io/tags/locking/","section":"tags","tags":null,"title":"Locking"},{"body":"","link":"https://gitorko.github.io/categories/locking/","section":"categories","tags":null,"title":"Locking"},{"body":"","link":"https://gitorko.github.io/tags/manytoone/","section":"tags","tags":null,"title":"Manytoone"},{"body":"","link":"https://gitorko.github.io/tags/onetomany/","section":"tags","tags":null,"title":"Onetomany"},{"body":"","link":"https://gitorko.github.io/tags/onetoone/","section":"tags","tags":null,"title":"Onetoone"},{"body":"","link":"https://gitorko.github.io/tags/optimistic-locking/","section":"tags","tags":null,"title":"Optimistic-Locking"},{"body":"","link":"https://gitorko.github.io/tags/pessimistic-locking/","section":"tags","tags":null,"title":"Pessimistic-Locking"},{"body":"Introduction to Spring JPA with examples. Create domain classes that map to database \u0026amp; write JPA queries to fetch the data.\nGithub: https://github.com/gitorko/project82\nSpring Data JPA Spring Data JPA provides an abstraction layer over the Java Persistence API (JPA) Spring Data JPA offers a repository abstraction that allows developers to interact with their data models using a repository pattern, which includes out-of-the-box implementations for common CRUD (Create, Read, Update, Delete) operations\nFeatures\nRepository Abstraction: Provides a high-level abstraction over the data access layer, allowing developers to define repositories with minimal code. Automatic Query Generation: Generates queries based on method names defined in repository interfaces. Pagination and Sorting: Supports pagination and sorting out of the box. Auditing: Supports auditing of entity changes (e.g., tracking created/modified dates and users). Custom Query Methods: Allows custom JPQL (Java Persistence Query Language) and SQL queries. Integration with Spring: Seamlessly integrates with the Spring Framework, including transaction management and dependency injection. Annotation Description @ManyToOne Most natural way to map a foreign key relation. Default to FETCH.EAGER @OneToMany Parent entity to map collection of child entities. If bi-directional then child entity has @ManyToOne. If child entities can grow then will affect performance. Use only when child entities are few @OneToOne Creates a foreign key in parent table that refers to the primary key of child table. @MapsId Single key acts as primary key \u0026amp; foreign key, with single key you can now fetch data from both table with same key. @ManyToMany Two parents on one child, avoid doing CascadeType.ALL, dont do orphan removal. mappedBy Present in parent, Tells hibernate that the child side is in charge of handling bi-directional association. mappedBy \u0026amp; @JoinColumn cant be present in the same class. For bi-directional associations where the child is in charge of handling association, you must still write setter methods in parent to sync both sides. Otherwise, you risk very subtle state propagation issues.\nSpring JPA determines of an object is new based on @Version annotation, you can also accomplish the same by implementing Persistable interface.\nSpring JPA uses dirty checking mechanism to determine if something has changed and then auto saves the data to database. Dirty checking default all columns as updated, if you want avoid it use the annotation @DynamicUpdate on the class.\nLocking \u0026amp; Transaction Isolation Locking ensures that the row is not concurrently updated by 2 different threads which might corrupt the data.\nProblem:\nThread A: Reads row with amount 100$ in Transaction T1 Thread B: Reads row with amount 100$ in Transaction T2 Thread A: Adds 10$, new amount is 110$ Thread B: Adds 10$, new amount is still 110$ instead of 120$.\nSolution 1 (Optimistic Locking):\nThread A: Reads row with amount 100$ in Transaction T1 Thread B: Reads row with amount 100$ in Transaction T2 Thread A: Adds 10$, new amount is 110$ Thread B: Adds 10$ and tries to save but sees that the record is not the same record that it read. So fails \u0026amp; does retry.\nSolution 2 (Pessimistic Locking):\nThread A: Reads row with amount 100$ in Transaction T1, it holds a row level lock. Thread B: Reads row in Transaction T2 but is blocked as T1 holds a lock, So it waits till timeout happens \u0026amp; retry. Thread A: Adds 10$, new amount is 110$ Thread B: Reads row with updated amount 110$ and updates to 120$\nTypes of locking\nPessimistic Locking - Locks held at row level or table level. Not ideal of high performance \u0026amp; cant scale. Optimistic Locking - Version field is added to the table, JPA ensures that version check is done before saving data, if the version has changed then update will throw Error. Ideal for high performance \u0026amp; can scale. Pessimistic locking\nLockModeType.PESSIMISTIC_READ - Rows are locked and can be read by other transactions, but they cannot be deleted or modified. PESSIMISTIC_READ guarantees repeatable reads. LockModeType.PESSIMISTIC_WRITE - Rows are locked and cannot be read, modified or deleted by other transactions. For PESSIMISTIC_WRITE no phantom reads can occur and access to data must be serialized. LockModeType.PESSIMISTIC_FORCE_INCREMENT - Rows are locked and cannot be read, modified or deleted by other transactions. it forces an increment of the version attribute Lock the row being read to avoid the same row from being updated by 2 different transactions\nselect * from table FOR SHARE - This clause locks the selected rows for read, other threads can read but cant modify. select * from table FOR UPDATE - This clause locks the selected rows for update. This prevents other transactions from reading/modifying these rows until the current transaction is completed (committed or rolled back) select * from table FOR UPDATE SKIP LOCKED clause - This clause tells the database to skip rows that are already locked by another transaction. Instead of waiting for the lock to be released\nOptimistic locking\nLockModeType.OPTIMISTIC - Checks the version attribute of the entity before committing the transaction to ensure no other transaction has modified the entity. LockModeType.OPTIMISTIC_FORCE_INCREMENT - Forces a version increment of the entity, even if the entity has not been modified during the update. Transaction Isolation\nTransaction isolation levels in JPA define the degree to which the operations within a transaction are isolated from the operations in other concurrent transactions JPA, typically using the underlying database and JDBC settings\nIsolation.READ_UNCOMMITTED Read Uncommitted - The lowest level of isolation. Transactions can read uncommitted changes made by other transactions. Isolation.READ_COMMITTED Read Committed - Transactions can only read committed changes made by other transactions. Isolation.REPEATABLE_READ Repeatable Read - If a transaction reads a row, it will get the same data if it reads the row again within the same transaction. Isolation.SERIALIZABLE Serializable - The highest level of isolation. Transactions are completely isolated from one another. Data Consistency\nDirty reads: read UNCOMMITED data from another transaction. Non-repeatable reads: read COMMITTED data from an UPDATE query from another transaction. Phantom reads: read COMMITTED data from an INSERT or DELETE query from another transaction. Dirty Read\nNAME AGE Bob 35 TRANSACTION T1 TRANSACTION T2 select age from table where name = 'Bob'; (35) update table set age = 40 where name = 'Bob'; select age from table where name = 'Bob'; (40) commit; Non-Repeatable Read\nNAME AGE Bob 35 TRANSACTION T1 TRANSACTION T2 select age from table where name = 'Bob'; (35) update table set age = 40 where name = 'Bob'; commit; select age from table where name = 'Bob'; (40) Phantom Read\nNAME AGE Bob 35 TRANSACTION T1 TRANSACTION T2 select count(*) from table where age = 35; (1) insert into table values ('jack', 35); commit; select count(*) from table where age = 35; (2) Behaviour of Isolation Levels\nIsolation Level Dirty Non-Repeatable Reads Phantom Reads Read Uncommitted Yes Yes Yes Read Committed No Yes Yes Read Committed No No Yes Serializable No No No 1spring: 2 jpa: 3 properties: 4 hibernate: 5 connection: 6 isolation: 2 1@Transactional(isolation = Isolation.SERIALIZABLE) 1SHOW default_transaction_isolation; Transaction Propagation\nWhen one transaciton functions calls another in the same class boundary then the parent transaction level is applied. You need to move the function to a different public class if you want its transaction to be enforced. When nested calls happen on transaction boundary then the transaction is suspended.\n@Transactional(readOnly = true) - transaction is readonly and now updates can happen. @Transactional(propagation = Propagation.REQUIRES_NEW) - creates a new transaction. @Transactional(propagation = Propagation.REQUIRED) - default, spring will create a new transaction if not present. @Transactional(propagation = Propagation.MANDATORY) - will throw exception if transaction doesn't exist. @Transactional(propagation = Propagation.SUPPORTS) - if existing transaction present then it will be used, else operation will happen without any transaction. @Transactional(propagation = Propagation.NOT_SUPPORTED) - operation will have with no transaction. @Transactional(propagation = Propagation.NOT_SUPPORTED) - will throw an exception if transaction present. You can define which exception call the rollback and which don't.\n1@Transactional(noRollbackFor = {CustomException.class}, rollbackFor = {RuntimeException.class}) To track transactions\n1logging: 2 level: 3 root: info 4 org.springframework.orm.jpa.JpaTransactionManager: DEBUG Spring keeps the transaction open till the controller returns the response. This is because it thinks that the object may be accessed later in the HTML (web mvc templates). We don't use this, so we will set the below property to false that way transaction is closed after @Transactional function ends.\n1spring: 2 jpa: 3 open-in-view: false By setting auto-commit to false spring won't commit immediately but will commit when the transaction ends.\n1spring: 2 datasource: 3 hikari: 4 auto-commit: false You can also use TransactionTemplate to control transactions if you dont want to use @Transactional and want more control. Try to the transaction boundary small. External calls need to be done outside the transaction context.\n1transactionTemplate.executeWithoutResult() 2transactionTemplate.execute() Code 1package com.demo.project82; 2 3import static org.junit.jupiter.api.Assertions.assertEquals; 4import static org.junit.jupiter.api.Assertions.assertNotNull; 5import static org.junit.jupiter.api.Assertions.assertNull; 6 7import java.math.BigDecimal; 8import java.nio.charset.StandardCharsets; 9import java.time.LocalDate; 10import java.util.Date; 11import java.util.HashMap; 12import java.util.List; 13import java.util.Map; 14import java.util.Optional; 15import java.util.concurrent.CountDownLatch; 16import java.util.concurrent.ExecutorService; 17import java.util.concurrent.Executors; 18import java.util.concurrent.TimeUnit; 19 20import com.demo.project82._00_constraints.Student00; 21import com.demo.project82._00_constraints.repo.Student00Repository; 22import com.demo.project82._01_one2one_unidirectional.Contact01; 23import com.demo.project82._01_one2one_unidirectional.Student01; 24import com.demo.project82._01_one2one_unidirectional.repo.Contact01Repository; 25import com.demo.project82._01_one2one_unidirectional.repo.Student01Repository; 26import com.demo.project82._02_one2one_unidirectional_mapsid.Contact02; 27import com.demo.project82._02_one2one_unidirectional_mapsid.Student02; 28import com.demo.project82._02_one2one_unidirectional_mapsid.repo.Contact02Repository; 29import com.demo.project82._02_one2one_unidirectional_mapsid.repo.Student02Repository; 30import com.demo.project82._03_one2one_unidirectional_no_cascade.Contact03; 31import com.demo.project82._03_one2one_unidirectional_no_cascade.Student03; 32import com.demo.project82._03_one2one_unidirectional_no_cascade.repo.Contact03Repository; 33import com.demo.project82._03_one2one_unidirectional_no_cascade.repo.Student03Repository; 34import com.demo.project82._04_one2one_bidirectional.Contact04; 35import com.demo.project82._04_one2one_bidirectional.Student04; 36import com.demo.project82._04_one2one_bidirectional.repo.Contact04Repository; 37import com.demo.project82._04_one2one_bidirectional.repo.Student04Repository; 38import com.demo.project82._05_one2one_bidirectional_nplus1_fixed.Student05; 39import com.demo.project82._05_one2one_bidirectional_nplus1_fixed.repo.Student05Repository; 40import com.demo.project82._06_one2many_3tables_unidirectional_wrong.Student06; 41import com.demo.project82._06_one2many_3tables_unidirectional_wrong.repo.Student06Repository; 42import com.demo.project82._07_one2many_unidirectional.Course07; 43import com.demo.project82._07_one2many_unidirectional.Student07; 44import com.demo.project82._07_one2many_unidirectional.repo.Student07Repository; 45import com.demo.project82._08_one2many_unidirectional_nplus1_fixed.Student08; 46import com.demo.project82._08_one2many_unidirectional_nplus1_fixed.repo.Student08Repository; 47import com.demo.project82._09_one2many_mappedby_wrong.Student09; 48import com.demo.project82._09_one2many_mappedby_wrong.repo.Student09Repository; 49import com.demo.project82._10_one2many_many2one_bidirectional_mappedby.Course10; 50import com.demo.project82._10_one2many_many2one_bidirectional_mappedby.Student10; 51import com.demo.project82._10_one2many_many2one_bidirectional_mappedby.repo.Course10Repository; 52import com.demo.project82._10_one2many_many2one_bidirectional_mappedby.repo.Student10Repository; 53import com.demo.project82._11_many2one_unidirectional.Course11; 54import com.demo.project82._11_many2one_unidirectional.Student11; 55import com.demo.project82._11_many2one_unidirectional.repo.Course11Repository; 56import com.demo.project82._11_many2one_unidirectional.repo.Student11Repository; 57import com.demo.project82._12_one2many_elementcollection_unidirectional.Phone12; 58import com.demo.project82._12_one2many_elementcollection_unidirectional.Student12; 59import com.demo.project82._12_one2many_elementcollection_unidirectional.repo.Student12Repository; 60import com.demo.project82._13_many2many_bidirectional.Student13; 61import com.demo.project82._13_many2many_bidirectional.Teacher13; 62import com.demo.project82._13_many2many_bidirectional.repo.Student13Repository; 63import com.demo.project82._13_many2many_bidirectional.repo.Teacher13Repository; 64import com.demo.project82._14_many2many_unidirectional.Student14; 65import com.demo.project82._14_many2many_unidirectional.Teacher14; 66import com.demo.project82._14_many2many_unidirectional.repo.Student14Repository; 67import com.demo.project82._14_many2many_unidirectional.repo.Teacher14Repository; 68import com.demo.project82._15_many2many_jointable_bidirectional.Student15; 69import com.demo.project82._15_many2many_jointable_bidirectional.Teacher15; 70import com.demo.project82._15_many2many_jointable_bidirectional.repo.Student15Repository; 71import com.demo.project82._15_many2many_jointable_bidirectional.repo.Teacher15Repository; 72import com.demo.project82._16_one2many_jointable_unidirectional.Course16; 73import com.demo.project82._16_one2many_jointable_unidirectional.Student16; 74import com.demo.project82._16_one2many_jointable_unidirectional.repo.Student16Repository; 75import com.demo.project82._17_one2many_jointable_mapkey.Course17; 76import com.demo.project82._17_one2many_jointable_mapkey.Student17; 77import com.demo.project82._17_one2many_jointable_mapkey.repo.Student17Repository; 78import com.demo.project82._18_one2one_jointable_unidirectional.Contact18; 79import com.demo.project82._18_one2one_jointable_unidirectional.Student18; 80import com.demo.project82._18_one2one_jointable_unidirectional.repo.Student18Repository; 81import com.demo.project82._19_one2many_unidirectional.Course19; 82import com.demo.project82._19_one2many_unidirectional.Student19; 83import com.demo.project82._19_one2many_unidirectional.repo.Course19Repository; 84import com.demo.project82._19_one2many_unidirectional.repo.Student19Repository; 85import com.demo.project82._20_enum_lob.Student20; 86import com.demo.project82._20_enum_lob.StudentType; 87import com.demo.project82._20_enum_lob.repo.Student20Repository; 88import com.demo.project82._21_audit.Student21; 89import com.demo.project82._21_audit.repo.Student21Repository; 90import com.demo.project82._22_unique_constraints.Student22; 91import com.demo.project82._22_unique_constraints.repo.Student22Repository; 92import com.demo.project82._23_nartual_id.Student23; 93import com.demo.project82._23_nartual_id.repo.Student23Repository; 94import com.demo.project82._24_composite_key.Student24; 95import com.demo.project82._24_composite_key.Student24Identity; 96import com.demo.project82._24_composite_key.repo.Student24Repository; 97import com.demo.project82._25_map.Student25; 98import com.demo.project82._25_map.repo.Student25Repository; 99import com.demo.project82._26_embeddable.Address; 100import com.demo.project82._26_embeddable.Student26; 101import com.demo.project82._26_embeddable.Teacher26; 102import com.demo.project82._26_embeddable.repo.Student26Repository; 103import com.demo.project82._26_embeddable.repo.Teacher26Repository; 104import com.demo.project82._27_inheritance.Student27; 105import com.demo.project82._27_inheritance.repo.Student27Repository; 106import com.demo.project82._28_projections.Student28; 107import com.demo.project82._28_projections.Student28DTO; 108import com.demo.project82._28_projections.Student28Pojo; 109import com.demo.project82._28_projections.Student28View; 110import com.demo.project82._28_projections.repo.Student28Repository; 111import com.demo.project82._29_pessimistic_locking.repo.Student29Repository; 112import com.demo.project82._29_pessimistic_locking.service.Student29Service; 113import com.demo.project82._30_optimistic_locking.Student30; 114import com.demo.project82._30_optimistic_locking.repo.Student30Repository; 115import com.demo.project82._30_optimistic_locking.service.Student30Service; 116import com.demo.project82._31_java_records.Student31Record; 117import com.demo.project82._31_java_records.repo.Student31Converter; 118import com.demo.project82._31_java_records.service.Student31Service; 119import com.demo.project82._32_transaction.Student32; 120import com.demo.project82._32_transaction.repo.Student32Repository; 121import com.demo.project82._32_transaction.service.Student32Service; 122import com.demo.project82._33_query_by_example.Student33; 123import com.demo.project82._33_query_by_example.repo.Student33Repository; 124import com.demo.project82._33_query_by_example.service.Student33Service; 125import com.demo.project82._34_proxy.Course34; 126import com.demo.project82._34_proxy.Student34; 127import com.demo.project82._34_proxy.repo.Course34Repository; 128import com.demo.project82._34_proxy.repo.Student34Repository; 129import com.demo.project82._35_json.Student35; 130import com.demo.project82._35_json.repo.Student35Repository; 131import jakarta.persistence.EntityManager; 132import jakarta.persistence.PersistenceContext; 133import org.junit.jupiter.api.Test; 134import org.springframework.beans.factory.annotation.Autowired; 135import org.springframework.boot.test.autoconfigure.jdbc.AutoConfigureTestDatabase; 136import org.springframework.boot.test.autoconfigure.orm.jpa.DataJpaTest; 137import org.springframework.context.annotation.Import; 138import org.springframework.data.domain.Page; 139import org.springframework.data.domain.PageRequest; 140import org.springframework.data.domain.Sort; 141import org.springframework.orm.ObjectOptimisticLockingFailureException; 142import org.springframework.transaction.support.TransactionTemplate; 143import org.testcontainers.junit.jupiter.Testcontainers; 144 145@Testcontainers 146@AutoConfigureTestDatabase(replace = AutoConfigureTestDatabase.Replace.NONE) 147@DataJpaTest 148@Import({Student29Service.class, Student30Service.class, Student31Service.class, Student32Service.class, 149 Student33Service.class, Student31Converter.class}) 150public class StudentTest extends BaseTest { 151 152 final ExecutorService threadPool = Executors.newFixedThreadPool(2); 153 154 @Autowired 155 Contact01Repository contact01Repository; 156 157 @Autowired 158 Contact02Repository contact02Repository; 159 160 @Autowired 161 Contact03Repository contact03Repository; 162 163 @Autowired 164 Contact04Repository contact04Repository; 165 166 @Autowired 167 Student00Repository student00Repository; 168 169 @Autowired 170 Student01Repository student01Repository; 171 172 @Autowired 173 Student02Repository student02Repository; 174 175 @Autowired 176 Student03Repository student03Repository; 177 178 @Autowired 179 Student04Repository student04Repository; 180 181 @Autowired 182 Student05Repository student05Repository; 183 184 @Autowired 185 Student06Repository student06Repository; 186 187 @Autowired 188 Student07Repository student07Repository; 189 190 @Autowired 191 Student08Repository student08Repository; 192 193 @Autowired 194 Student09Repository student09Repository; 195 196 @Autowired 197 Student10Repository student10Repository; 198 199 @Autowired 200 Student11Repository student11Repository; 201 202 @Autowired 203 Student12Repository student12Repository; 204 205 @Autowired 206 Student13Repository student13Repository; 207 208 @Autowired 209 Student14Repository student14Repository; 210 211 @Autowired 212 Student15Repository student15Repository; 213 214 @Autowired 215 Student16Repository student16Repository; 216 217 @Autowired 218 Student17Repository student17Repository; 219 220 @Autowired 221 Student18Repository student18Repository; 222 223 @Autowired 224 Student19Repository student19Repository; 225 226 @Autowired 227 Student20Repository student20Repository; 228 229 @Autowired 230 Student21Repository student21Repository; 231 232 @Autowired 233 Student22Repository student22Repository; 234 235 @Autowired 236 Student23Repository student23Repository; 237 238 @Autowired 239 Student24Repository student24Repository; 240 241 @Autowired 242 Student25Repository student25Repository; 243 244 @Autowired 245 Student26Repository student26Repository; 246 247 @Autowired 248 Student27Repository student27Repository; 249 250 @Autowired 251 Student28Repository student28Repository; 252 253 @Autowired 254 Student29Repository student29Repository; 255 256 @Autowired 257 Student30Repository student30Repository; 258 259 @Autowired 260 Student32Repository student32Repository; 261 262 @Autowired 263 Student33Repository student33Repository; 264 265 @Autowired 266 Student34Repository student34Repository; 267 268 @Autowired 269 Student35Repository student35Repository; 270 271 @Autowired 272 Teacher26Repository teacher26Repository; 273 274 @Autowired 275 Course10Repository course10Repository; 276 277 @Autowired 278 Course11Repository course11Repository; 279 280 @Autowired 281 Course19Repository course19Repository; 282 283 @Autowired 284 Course34Repository course34Repository; 285 286 @Autowired 287 Teacher13Repository teacher13Repository; 288 289 @Autowired 290 Teacher14Repository teacher14Repository; 291 292 @Autowired 293 Teacher15Repository teacher15Repository; 294 295 @Autowired 296 TransactionTemplate transactionTemplate; 297 298 @Autowired 299 Student31Service student31Service; 300 301 @Autowired 302 Student33Service student33Service; 303 304 @PersistenceContext 305 EntityManager entityManager; 306 307 @Test 308 public void test_00_constraints_entityManager() { 309 transactionTemplate.executeWithoutResult(status -\u0026gt; { 310 String photo = \u0026#34;photo\u0026#34;; 311 Student00 student = Student00.builder() 312 .studentName(\u0026#34;Jack\u0026#34;) 313 .userName(\u0026#34;jack\u0026#34;) 314 .dob(new Date()) 315 .registered_on(LocalDate.now()) 316 .age(40) 317 .email(\u0026#34;email@email.com\u0026#34;) 318 .gpaScore(BigDecimal.valueOf(9.9)) 319 .notes(\u0026#34;something about student\u0026#34;) 320 .blob(photo.getBytes(StandardCharsets.UTF_8)) 321 .build(); 322 entityManager.persist(student); 323 entityManager.flush(); 324 entityManager.clear(); 325 System.out.println(\u0026#34;Student: \u0026#34; + student); 326 }); 327 } 328 329 @Test 330 public void test_00_constraints() { 331 String photo = \u0026#34;photo\u0026#34;; 332 Student00 student = Student00.builder() 333 .studentName(\u0026#34;Jack\u0026#34;) 334 .userName(\u0026#34;jack\u0026#34;) 335 .dob(new Date()) 336 .registered_on(LocalDate.now()) 337 .age(40) 338 .email(\u0026#34;email@email.com\u0026#34;) 339 .gpaScore(BigDecimal.valueOf(9.9)) 340 .notes(\u0026#34;something about student\u0026#34;) 341 .blob(photo.getBytes(StandardCharsets.UTF_8)) 342 .build(); 343 Student00 savedStudent = student00Repository.save(student); 344 System.out.println(\u0026#34;Student: \u0026#34; + student); 345 assertNotNull(savedStudent.getId()); 346 assertNotNull(savedStudent.getDob()); 347 assertNotNull(savedStudent.getRegistered_on()); 348 } 349 350 @Test 351 public void test_01_one2one_unidirectional() { 352 Contact01 contact = Contact01.builder().address(\u0026#34;Bangalore\u0026#34;).build(); 353 Student01 student = Student01.builder().studentName(\u0026#34;Jack\u0026#34;).contact(contact).build(); 354 Student01 savedStudent = student01Repository.save(student); 355 assertNotNull(savedStudent.getId()); 356 assertNotNull(savedStudent.getContact().getId()); 357 } 358 359 @Test 360 public void test_02_one2one_unidirectional_mapsid() { 361 Contact02 contact = Contact02.builder().address(\u0026#34;Bangalore\u0026#34;).build(); 362 Student02 student = Student02.builder().studentName(\u0026#34;Jack\u0026#34;).contact(contact).build(); 363 Student02 savedStudent = student02Repository.save(student); 364 //No cascade but contact still saved. 365 assertNotNull(savedStudent.getId()); 366 assertNotNull(savedStudent.getContact().getId()); 367 } 368 369 @Test 370 public void test_03_one2one_unidirectional_no_cascade() { 371 Contact03 contact = Contact03.builder().address(\u0026#34;Bangalore\u0026#34;).build(); 372 Student03 student = Student03.builder().studentName(\u0026#34;Jack\u0026#34;).contact(contact).build(); 373 Student03 savedStudent = student03Repository.save(student); 374 //no cascade so contact is not saved. 375 assertNotNull(savedStudent.getId()); 376 assertNull(savedStudent.getContact().getId()); 377 } 378 379 @Test 380 public void test_04_one2one_bidirectional() { 381 Contact04 contact1 = Contact04.builder().address(\u0026#34;Bangalore\u0026#34;).build(); 382 Student04 student1 = Student04.builder().studentName(\u0026#34;Jack\u0026#34;).contact(contact1).build(); 383 384 Student04 savedStudent = student04Repository.save(student1); 385 assertNotNull(savedStudent.getContact().getId()); 386 assertNotNull(savedStudent.getId()); 387 Optional\u0026lt;Student04\u0026gt; studentOptional = student04Repository.findById(savedStudent.getId()); 388 assertNotNull(studentOptional.get().getContact().getId()); 389 390 Student04 student2 = Student04.builder().studentName(\u0026#34;Jack\u0026#34;).build(); 391 Contact04 contact2 = Contact04.builder().address(\u0026#34;Bangalore\u0026#34;).student(student2).build(); 392 393 Contact04 savedContact = contact04Repository.save(contact2); 394 assertNotNull(savedContact.getStudent().getId()); 395 assertNotNull(savedContact.getId()); 396 Optional\u0026lt;Contact04\u0026gt; contactOptional = contact04Repository.findById(savedContact.getId()); 397 assertNotNull(contactOptional.get().getStudent().getId()); 398 } 399 400 @Test 401 public void test_04_one2one_bidirectional_nplus1() { 402 //creates the N+1 problem 403 Iterable\u0026lt;Student04\u0026gt; studentList = student04Repository.findAll(); 404 //Even though student contact is not required it is loaded as the relation is @OneToOne 405 studentList.forEach(e -\u0026gt; { 406 assertNotNull(e.getId()); 407 }); 408 } 409 410 @Test 411 public void test_05_one2one_bidirectional_nplus1_fixed() { 412 //N+1 problem solved 413 Iterable\u0026lt;Student05\u0026gt; studentList = student05Repository.findAll(); 414 studentList.forEach(e -\u0026gt; { 415 assertNotNull(e.getId()); 416 }); 417 } 418 419 @Test 420 public void test_06_one2many_3tables_unidirectional_wrong() { 421 //Wrong way to map relation don\u0026#39;t use this 422 Iterable\u0026lt;Student06\u0026gt; studentList = student06Repository.findAll(); 423 studentList.forEach(e -\u0026gt; { 424 assertNotNull(e.getId()); 425 assertEquals(3, e.getCourses().size()); 426 }); 427 } 428 429 @Test 430 public void test_07_one2many_unidirectional() { 431 Iterable\u0026lt;Student07\u0026gt; studentList = student07Repository.findAll(); 432 studentList.forEach(e -\u0026gt; { 433 assertNotNull(e.getId()); 434 assertEquals(3, e.getCourses().size()); 435 }); 436 } 437 438 @Test 439 public void test_07_one2many_unidirectional_save() { 440 Course07 course = Course07.builder() 441 .courseName(\u0026#34;chemistry\u0026#34;) 442 .build(); 443 Student07 student = Student07.builder() 444 .studentName(\u0026#34;Jack\u0026#34;) 445 .courses(List.of(course)) 446 .build(); 447 Student07 savedStudent = student07Repository.save(student); 448 assertNotNull(savedStudent.getId()); 449 assertEquals(1, savedStudent.getCourses().size()); 450 } 451 452 @Test 453 public void test_08_one2many_unidirectional_nplus1_fixed() { 454 Iterable\u0026lt;Student08\u0026gt; studentList = student08Repository.findAll(); 455 studentList.forEach(e -\u0026gt; { 456 assertNotNull(e.getId()); 457 assertEquals(3, e.getCourses().size()); 458 }); 459 } 460 461 @Test 462 public void test_09_one2many_mappedby_wrong() { 463 //Wrong way to map relation don\u0026#39;t use this 464 Iterable\u0026lt;Student09\u0026gt; studentList = student09Repository.findAll(); 465 studentList.forEach(e -\u0026gt; { 466 assertNotNull(e.getId()); 467 assertEquals(3, e.getCourses().size()); 468 }); 469 } 470 471 @Test 472 public void test_10_one2many_many2one_bidirectional_mappedby_1() { 473 Iterable\u0026lt;Student10\u0026gt; student10List = student10Repository.findAll(); 474 student10List.forEach(e -\u0026gt; { 475 assertNotNull(e.getId()); 476 System.out.println(\u0026#34;Student Name: \u0026#34; + e.getStudentName()); 477 assertEquals(3, e.getCourses().size()); 478 }); 479 } 480 481 @Test 482 public void test_10_one2many_many2one_bidirectional_mappedby_2() { 483 List\u0026lt;Course10\u0026gt; history = course10Repository.findAllByCourseName(\u0026#34;history\u0026#34;); 484 assertEquals(2, history.size()); 485 history.forEach(e -\u0026gt; { 486 System.out.println(\u0026#34;Student Name: \u0026#34; + e.getStudent().getStudentName()); 487 assertNotNull(e.getStudent().getId()); 488 }); 489 } 490 491 @Test 492 public void test_10_one2many_many2one_bidirectional_mappedby_3() { 493 Course10 historyCourse = Course10.builder() 494 .courseName(\u0026#34;history\u0026#34;) 495 .build(); 496 Course10 physicsCourse = Course10.builder() 497 .courseName(\u0026#34;physics\u0026#34;) 498 .build(); 499 Student10 student = Student10.builder() 500 .studentName(\u0026#34;Jack\u0026#34;) 501 .build(); 502 student.addCourse(historyCourse); 503 student.addCourse(physicsCourse); 504 Student10 savedStudent = student10Repository.save(student); 505 506 List\u0026lt;Course10\u0026gt; courses = course10Repository.findAllByStudent(savedStudent); 507 assertEquals(2, courses.size()); 508 } 509 510 @Test 511 public void test_10_one2many_many2one_bidirectional_mappedby_4() { 512 Course10 historyCourse = Course10.builder() 513 .courseName(\u0026#34;history\u0026#34;) 514 .build(); 515 Course10 physicsCourse = Course10.builder() 516 .courseName(\u0026#34;physics\u0026#34;) 517 .build(); 518 Student10 student = Student10.builder() 519 .studentName(\u0026#34;Jack\u0026#34;) 520 .build(); 521 student.addCourse(historyCourse); 522 student.addCourse(physicsCourse); 523 Course10 savedHistoryCourse = course10Repository.save(historyCourse); 524 Course10 savedPhysicsCourse = course10Repository.save(physicsCourse); 525 Student10 savedStudent = savedHistoryCourse.getStudent(); 526 assertNotNull(savedStudent.getId()); 527 528 List\u0026lt;Course10\u0026gt; courses = course10Repository.findAllByStudent(savedStudent); 529 assertEquals(2, courses.size()); 530 } 531 532 @Test 533 public void test_11_many2one_unidirectional() { 534 //Get all the students \u0026amp; for each student get all the courses 535 Iterable\u0026lt;Student11\u0026gt; studentList = student11Repository.findAll(); 536 studentList.forEach(e -\u0026gt; { 537 assertNotNull(e.getId()); 538 System.out.println(\u0026#34;Student Name: \u0026#34; + e.getStudentName()); 539 List\u0026lt;Course11\u0026gt; courses = course11Repository.findAllByStudent(e); 540 courses.forEach(c -\u0026gt; { 541 System.out.println(\u0026#34;Course: \u0026#34; + c.getCourseName()); 542 }); 543 }); 544 } 545 546 @Test 547 public void test_11_many2one_unidirectional_save() { 548 Student11 student = Student11.builder() 549 .studentName(\u0026#34;Jack\u0026#34;) 550 .build(); 551 Course11 historyCourse = Course11.builder() 552 .courseName(\u0026#34;history\u0026#34;) 553 .student(student) 554 .build(); 555 Course11 physicsCourse = Course11.builder() 556 .courseName(\u0026#34;physics\u0026#34;) 557 .student(student) 558 .build(); 559 Course11 savedHistory = course11Repository.save(historyCourse); 560 Course11 savedPhysics = course11Repository.save(physicsCourse); 561 Student11 savedStudent = student11Repository.findById(savedHistory.getStudent().getId()).orElseGet(null); 562 563 List\u0026lt;Course11\u0026gt; courses = course11Repository.findAllByStudent(savedStudent); 564 assertEquals(2, courses.size()); 565 } 566 567 @Test 568 public void test_12_one2many_elementcollection_unidirectional() { 569 Phone12 phone1 = Phone12.builder() 570 .phone(\u0026#34;999-999-9999\u0026#34;) 571 .build(); 572 Student12 student = Student12.builder() 573 .studentName(\u0026#34;Jack\u0026#34;) 574 .phones(List.of(phone1)) 575 .build(); 576 Student12 savedStudent12 = student12Repository.save(student); 577 assertNotNull(savedStudent12.getId()); 578 } 579 580 @Test 581 public void test_12_one2many_elementcollection_unidirectional_find() { 582 Iterable\u0026lt;Student12\u0026gt; listOfStudents = student12Repository.findAll(); 583 listOfStudents.forEach(e -\u0026gt; { 584 assertEquals(2, e.getPhones().size()); 585 }); 586 } 587 588 @Test 589 public void test_13_many2many_bidirectional() { 590 Teacher13 teacher1 = Teacher13.builder() 591 .teacherName(\u0026#34;Mr. Adam\u0026#34;) 592 .build(); 593 Teacher13 teacher2 = Teacher13.builder() 594 .teacherName(\u0026#34;Mr. Smith\u0026#34;) 595 .build(); 596 Student13 student1 = Student13.builder() 597 .studentName(\u0026#34;Jack\u0026#34;) 598 .build(); 599 Student13 student2 = Student13.builder() 600 .studentName(\u0026#34;David\u0026#34;) 601 .build(); 602 603 Student13 savedStudent1 = student13Repository.save(student1); 604 Student13 savedStudent2 = student13Repository.save(student2); 605 606 Teacher13 savedTeacher1 = teacher13Repository.save(teacher1); 607 Teacher13 savedTeacher2 = teacher13Repository.save(teacher2); 608 609 savedTeacher1.addStudent(student1); 610 teacher13Repository.save(savedTeacher1); 611 612 savedStudent2.addTeacher(savedTeacher2); 613 student13Repository.save(savedStudent2); 614 } 615 616 @Test 617 public void test_14_many2many_unidirectional_save() { 618 Teacher14 teacher1 = Teacher14.builder() 619 .teacherName(\u0026#34;Mr. Adam\u0026#34;) 620 .build(); 621 Teacher14 teacher2 = Teacher14.builder() 622 .teacherName(\u0026#34;Mr. Smith\u0026#34;) 623 .build(); 624 Student14 student1 = Student14.builder() 625 .studentName(\u0026#34;Jack\u0026#34;) 626 .build(); 627 Student14 student2 = Student14.builder() 628 .studentName(\u0026#34;David\u0026#34;) 629 .build(); 630 631 Student14 savedStudent1 = student14Repository.save(student1); 632 Student14 savedStudent2 = student14Repository.save(student2); 633 634 Teacher14 savedTeacher1 = teacher14Repository.save(teacher1); 635 Teacher14 savedTeacher2 = teacher14Repository.save(teacher2); 636 637 savedStudent1.addTeacher(savedTeacher1); 638 savedStudent2.addTeacher(savedTeacher2); 639 student14Repository.save(savedStudent1); 640 student14Repository.save(savedStudent2); 641 } 642 643 @Test 644 public void test_14_many2many_unidirectional_delete() { 645 646 Student14 savedStudent1 = student14Repository.findById(100l).orElseGet(null); 647 Student14 savedStudent2 = student14Repository.findById(101l).orElse(null); 648 649 Teacher14 savedTeacher1 = teacher14Repository.findById(200l).orElse(null); 650 Teacher14 savedTeacher2 = teacher14Repository.findById(201l).orElse(null); 651 652 savedStudent1.removeTeacher(savedTeacher1); 653 student14Repository.save(savedStudent1); 654 655 savedStudent1 = student14Repository.findById(100l).orElseGet(null); 656 assertEquals(1, savedStudent1.getTeachers().size()); 657 658 savedStudent2 = student14Repository.findById(101l).orElseGet(null); 659 assertEquals(2, savedStudent2.getTeachers().size()); 660 } 661 662 @Test 663 public void test_15_many2many_jointable_bidirectional() { 664 Teacher15 teacher1 = Teacher15.builder() 665 .teacherName(\u0026#34;Mr. Adam\u0026#34;) 666 .build(); 667 Teacher15 teacher2 = Teacher15.builder() 668 .teacherName(\u0026#34;Mr. Smith\u0026#34;) 669 .build(); 670 Student15 student1 = Student15.builder() 671 .studentName(\u0026#34;Jack\u0026#34;) 672 .build(); 673 Student15 student2 = Student15.builder() 674 .studentName(\u0026#34;David\u0026#34;) 675 .build(); 676 677 Student15 savedStudent1 = student15Repository.save(student1); 678 Student15 savedStudent2 = student15Repository.save(student2); 679 680 Teacher15 savedTeacher1 = teacher15Repository.save(teacher1); 681 Teacher15 savedTeacher2 = teacher15Repository.save(teacher2); 682 683 savedTeacher1.addStudent(student1); 684 teacher15Repository.save(savedTeacher1); 685 686 savedStudent2.addTeacher(savedTeacher2); 687 student15Repository.save(savedStudent2); 688 } 689 690 @Test 691 public void test_16_one2many_jointable_unidirectional() { 692 Course16 physicsCourse = Course16.builder() 693 .courseName(\u0026#34;physics\u0026#34;) 694 .build(); 695 Course16 chemistryCourse = Course16.builder() 696 .courseName(\u0026#34;chemistry\u0026#34;) 697 .build(); 698 Student16 student = Student16.builder() 699 .studentName(\u0026#34;Jack\u0026#34;) 700 .courses(List.of(physicsCourse, chemistryCourse)) 701 .build(); 702 Student16 savedStudent = student16Repository.save(student); 703 assertNotNull(savedStudent.getId()); 704 } 705 706 @Test 707 public void test_17_one2many_jointable_mapkey() { 708 Course17 physicsCourse = Course17.builder() 709 .courseName(\u0026#34;physics\u0026#34;) 710 .build(); 711 Course17 chemistryCourse = Course17.builder() 712 .courseName(\u0026#34;chemistry\u0026#34;) 713 .build(); 714 Student17 student = Student17.builder() 715 .studentName(\u0026#34;Jack\u0026#34;) 716 .build(); 717 Map\u0026lt;String, Course17\u0026gt; courseMap = new HashMap\u0026lt;\u0026gt;(); 718 courseMap.put(\u0026#34;physics\u0026#34;, physicsCourse); 719 courseMap.put(\u0026#34;chemistry\u0026#34;, chemistryCourse); 720 student.setCourseMap(courseMap); 721 Student17 savedStudent = student17Repository.save(student); 722 assertNotNull(savedStudent.getId()); 723 } 724 725 @Test 726 public void test_18_one2one_jointable_unidirectional() { 727 Contact18 contact = Contact18.builder() 728 .address(\u0026#34;Bangalore\u0026#34;) 729 .build(); 730 Student18 student = Student18.builder() 731 .studentName(\u0026#34;Jack\u0026#34;) 732 .contact(contact) 733 .build(); 734 Student18 savedStudent = student18Repository.save(student); 735 assertNotNull(savedStudent.getId()); 736 assertNotNull(savedStudent.getContact().getId()); 737 } 738 739 @Test 740 public void test_19_one2many_unidirectional_save() { 741 Student19 student = Student19.builder() 742 .studentName(\u0026#34;Jack\u0026#34;) 743 .build(); 744 Course19 course = Course19.builder() 745 .courseName(\u0026#34;physics\u0026#34;) 746 .student(student) 747 .build(); 748 Course19 savedCourse = course19Repository.save(course); 749 assertNotNull(savedCourse.getId()); 750 assertNotNull(savedCourse.getStudent().getId()); 751 } 752 753 @Test 754 public void test_19_one2many_unidirectional_find() { 755 Iterable\u0026lt;Student19\u0026gt; students = student19Repository.findAll(); 756 students.forEach(e -\u0026gt; { 757 System.out.println(\u0026#34;Student: \u0026#34; + e); 758 List\u0026lt;Course19\u0026gt; courses = course19Repository.findAllByStudent(e); 759 assertEquals(3, courses.size()); 760 courses.forEach(c -\u0026gt; { 761 System.out.println(\u0026#34;Student: \u0026#34; + e + \u0026#34;, Course: \u0026#34; + c); 762 assertNotNull(c.getId()); 763 }); 764 }); 765 } 766 767 @Test 768 public void test_20_enum_lob() { 769 Student20 student = Student20.builder() 770 .studentName(\u0026#34;Jack\u0026#34;) 771 .studentType(StudentType.FULL_TIME) 772 .build(); 773 Student20 savedStudent = student20Repository.save(student); 774 assertNotNull(savedStudent.getId()); 775 assertEquals(StudentType.FULL_TIME, savedStudent.getStudentType()); 776 } 777 778 @Test 779 public void test_21_audit() { 780 Student21 student = Student21.builder() 781 .studentName(\u0026#34;Jack\u0026#34;) 782 .build(); 783 Student21 savedStudent = student21Repository.save(student); 784 assertNotNull(savedStudent.getId()); 785 assertNotNull(savedStudent.getCreatedAt()); 786 assertNotNull(savedStudent.getUpdatedAt()); 787 } 788 789 @Test 790 public void test_22_unique_constraints() { 791 Student22 student = Student22.builder() 792 .studentName(\u0026#34;Jack\u0026#34;) 793 .userName(\u0026#34;user01\u0026#34;) 794 .email(\u0026#34;email@email.com\u0026#34;) 795 .build(); 796 Student22 savedStudent = student22Repository.save(student); 797 assertNotNull(savedStudent.getId()); 798 } 799 800 @Test 801 public void test_23_nartual_id() { 802 Student23 student = Student23.builder() 803 .studentName(\u0026#34;Jack\u0026#34;) 804 .email(\u0026#34;email@email.com\u0026#34;) 805 .build(); 806 Student23 savedStudent = student23Repository.save(student); 807 assertNotNull(savedStudent.getId()); 808 } 809 810 @Test 811 public void test_24_composite_key() { 812 Student24 student = Student24.builder() 813 .student24Identity(Student24Identity.builder() 814 .registrationId(\u0026#34;R-568\u0026#34;) 815 .studentId(\u0026#34;S-457\u0026#34;) 816 .build()) 817 .studentName(\u0026#34;Jack\u0026#34;) 818 .build(); 819 Student24 savedStudent = student24Repository.save(student); 820 assertNotNull(savedStudent); 821 } 822 823 @Test 824 public void test_25_map() { 825 Map\u0026lt;String, Object\u0026gt; attributes = new HashMap\u0026lt;\u0026gt;(); 826 attributes.put(\u0026#34;address\u0026#34;, \u0026#34;123 Main Street\u0026#34;); 827 attributes.put(\u0026#34;zipcode\u0026#34;, 12345); 828 829 Student25 student = Student25.builder() 830 .studentName(\u0026#34;jack\u0026#34;) 831 .attributes(attributes) 832 .build(); 833 Student25 savedStudent25 = student25Repository.save(student); 834 assertNotNull(savedStudent25); 835 836 Student25 findStudent25 = student25Repository.findById(savedStudent25.getId()).orElseThrow(); 837 assertEquals(12345, findStudent25.getAttributes().get(\u0026#34;zipcode\u0026#34;)); 838 } 839 840 @Test 841 public void test_26_embeddable() { 842 Student26 student = Student26.builder() 843 .studentName(\u0026#34;Jack\u0026#34;) 844 .addresses(Address.builder() 845 .addressLine(\u0026#34;5th street\u0026#34;) 846 .city(\u0026#34;Bangalore\u0026#34;) 847 .country(\u0026#34;India\u0026#34;) 848 .zipCode(\u0026#34;570021\u0026#34;) 849 .build()) 850 .build(); 851 Teacher26 teacher = Teacher26.builder() 852 .teacherName(\u0026#34;Mr. Adams\u0026#34;) 853 .addresses(Address.builder() 854 .addressLine(\u0026#34;9th street\u0026#34;) 855 .city(\u0026#34;Bangalore\u0026#34;) 856 .country(\u0026#34;India\u0026#34;) 857 .zipCode(\u0026#34;570015\u0026#34;) 858 .build()) 859 .build(); 860 Student26 savedStudent = student26Repository.save(student); 861 Teacher26 savedTeacher = teacher26Repository.save(teacher); 862 assertNotNull(savedStudent.getId()); 863 assertNotNull(savedTeacher.getId()); 864 } 865 866 @Test 867 public void test_27_inheritance() { 868 Student27 student = Student27.builder() 869 .studentName(\u0026#34;Jack\u0026#34;) 870 .build(); 871 Student27 savedStudent = student27Repository.save(student); 872 assertNotNull(savedStudent.getId()); 873 } 874 875 @Test 876 public void test_28_projections() { 877 Student28 student = Student28.builder() 878 .studentName(\u0026#34;Jack\u0026#34;) 879 .notes(\u0026#34;something about student\u0026#34;) 880 .monthlySalary(5000) 881 .build(); 882 Student28 savedStudent = student28Repository.save(student); 883 assertNotNull(savedStudent.getId()); 884 885 Student28View student27View = student28Repository.getStudent27View(savedStudent.getStudentName()); 886 assertEquals(60000, student27View.getAnnualSalary()); 887 888 Student28DTO student27Dto = student28Repository.getStudent27Dto(savedStudent.getStudentName()); 889 assertEquals(60000, student27Dto.annualSalary()); 890 891 Student28Pojo student27Pojo = student28Repository.getStudent27Pojo(savedStudent.getStudentName()); 892 assertEquals(60000, student27Pojo.getAnnualSalary()); 893 } 894 895 @Test 896 public void test_30_optimistic_locking_multi_thread() throws InterruptedException { 897 CountDownLatch latch = new CountDownLatch(2); 898 modifyStudent30(100l, latch); 899 modifyStudent30(100l, latch); 900 latch.await(5, TimeUnit.SECONDS); 901 Student30 student = student30Repository.findById(100l).orElseThrow(); 902 System.out.println(\u0026#34;Student: \u0026#34; + student); 903 } 904 905 private void modifyStudent30(Long id, CountDownLatch latch) { 906 threadPool.submit(() -\u0026gt; { 907 try { 908 Student30 student = student30Repository.findById(id).orElseThrow(); 909 student.setStudentName(student.getStudentName() + \u0026#34;_\u0026#34; + Thread.currentThread().getName()); 910 //Delay so that version is updated before next thread saves. 911 TimeUnit.SECONDS.sleep(5); 912 student30Repository.save(student); 913 } catch (ObjectOptimisticLockingFailureException | InterruptedException ex) { 914 ex.printStackTrace(); 915 assertEquals(ObjectOptimisticLockingFailureException.class, ex.getClass()); 916 } finally { 917 latch.countDown(); 918 } 919 }); 920 } 921 922 @Test 923 public void test_31_java_records() { 924 Student31Record student = new Student31Record(null, \u0026#34;jack\u0026#34;); 925 Student31Record savedStudent = student31Service.save(student); 926 assertNotNull(savedStudent.id()); 927 } 928 929 @Test 930 public void test_32_transaction() { 931 /** 932 * Tests do transaction rollback after the test is completed. 933 */ 934 Student32 student = Student32.builder() 935 .studentName(\u0026#34;jack\u0026#34;) 936 .build(); 937 Student32 savedStudent = student32Repository.save(student); 938 assertNotNull(savedStudent.getId()); 939 } 940 941 @Test 942 public void test_32_dynamic_update() { 943 //Check the SQL update statement they will contain on the columns that changed as we used @DynamicUpdate 944 //Since tests don\u0026#39;t persist the data you will not see this in the test logs. 945 Student32 student = Student32.builder() 946 .studentName(\u0026#34;jack\u0026#34;) 947 .build(); 948 Student32 savedStudent = student32Repository.save(student); 949 assertNotNull(savedStudent.getId()); 950 } 951 952 @Test 953 public void test_33_queryByExample() { 954 Student33 exampleStudent = Student33.builder() 955 .studentName(\u0026#34;jack\u0026#34;) 956 .build(); 957 Student33 student = student33Service.findByOneExample1(exampleStudent); 958 assertNotNull(student.getId()); 959 960 student = student33Service.findByOneExample2(exampleStudent); 961 assertNotNull(student.getId()); 962 963 List\u0026lt;Student33\u0026gt; students = student33Service.findAllExample1(exampleStudent); 964 assertEquals(1, students.size()); 965 966 students = student33Service.findAllExample2(exampleStudent); 967 assertEquals(1, students.size()); 968 969 Page\u0026lt;Student33\u0026gt; page = student33Service.findAllByPage(PageRequest.of(0, 10, Sort.by(\u0026#34;studentName\u0026#34;))); 970 assertEquals(5, page.getTotalElements()); 971 972 page = student33Service.findAllByPageSort(PageRequest.of(0, 10)); 973 assertEquals(5, page.getTotalElements()); 974 975 students = student33Service.findByNameAndAgeIndex(\u0026#34;raj\u0026#34;, 34); 976 assertEquals(1, students.size()); 977 978 students = student33Service.findByNameAndAgeParam(\u0026#34;raj\u0026#34;, 34); 979 assertEquals(1, students.size()); 980 } 981 982 @Test 983 public void test_34_proxy() { 984 //by using getReferenceById we get a proxy object instead of the real object. 985 //with just the proxy object we are able to get the courses. 986 //drawback is that there can be constraint violation if the object doesn\u0026#39;t really exist as we never checked the db. 987 Student34 student = student34Repository.getReferenceById(100l); 988 List\u0026lt;Course34\u0026gt; courses = course34Repository.findAllByStudent(student); 989 assertEquals(3, courses.size()); 990 } 991 992 @Test 993 public void test_35_json() { 994 String payload = \u0026#34;{\\\u0026#34;city\\\u0026#34;: \\\u0026#34;bangalore\\\u0026#34;}\u0026#34;; 995 Student35 student = Student35.builder() 996 .studentName(\u0026#34;jack\u0026#34;) 997 .payload(payload) 998 .build(); 999 Student35 savedStudent35 = student35Repository.save(student); 1000 assertNotNull(savedStudent35); 1001 1002 Student35 findStudent35 = student35Repository.findById(savedStudent35.getId()).orElseThrow(); 1003 assertEquals(payload, findStudent35.getPayload()); 1004 1005 List\u0026lt;Student35\u0026gt; studentList = student35Repository.findByCity(\u0026#34;bangalore\u0026#34;); 1006 assertEquals(1, studentList.size()); 1007 } 1008 1009} Setup 1# Project82 2 3Spring Data JPA Essentials 4 5### Version 6 7Check version 8 9```bash 10$java --version 11openjdk version \u0026#34;21.0.3\u0026#34; 2024-04-16 LTS 12``` 13 14### Postgres DB 15 16``` 17docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:14 18docker ps 19docker exec -it pg-container psql -U postgres -W postgres 20CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 21CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 22grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 23 24docker stop pg-container 25docker start pg-container 26``` 27 28There is a bug in spring data jpa where `jakarta.persistence.lock.timeout` is not working for postgres. 29Hence set the timeout at database level to 10 seconds. 30 31```bash 32ALTER DATABASE \u0026#34;test-db\u0026#34; SET lock_timeout=10000; 33``` 34 35To look at isolation level 36 37```bash 38SHOW default_transaction_isolation; 39ALTER DATABASE \u0026#34;test-db\u0026#34; SET default_transaction_isolation = \u0026#39;read committed\u0026#39; 40``` 41 42### Dev 43 44To run the backend in dev mode. 45 46```bash 47./gradlew clean build 48./gradlew bootRun 49``` References https://vladmihalcea.com/blog/\nhttps://thorben-janssen.com/ultimate-guide-association-mappings-jpa-hibernate/\nhttps://docs.spring.io/spring-data/jpa/reference/repositories/projections.html\n","link":"https://gitorko.github.io/post/spring-data-jpa-basics/","section":"post","tags":["jdbc","onetomany","manytoone","onetoone","jointable","locking","transactional"],"title":"Spring Data JPA - Basics"},{"body":"When an app is deployed on more than one server how to you ensure that 2 threads dont modify the same record in db? If the operation was performed on a single JVM you could look at locking but since there are many jvm the locking has to be done at database level.\nGithub: https://github.com/gitorko/project82\nLocking \u0026amp; Transaction Isolation Locking ensures that the row is not concurrently updated by 2 different threads which might corrupt the data.\nProblem:\nThread A: Reads row with amount 100$ in Transaction T1 Thread B: Reads row with amount 100$ in Transaction T2 Thread A: Adds 10$, new amount is 110$ Thread B: Adds 10$, new amount is still 110$ instead of 120$.\nSolution 1 (Optimistic Locking):\nThread A: Reads row with amount 100$ in Transaction T1 Thread B: Reads row with amount 100$ in Transaction T2 Thread A: Adds 10$, new amount is 110$ Thread B: Adds 10$ and tries to save but sees that the record is not the same record that it read. So fails \u0026amp; does retry.\nSolution 2 (Pessimistic Locking):\nThread A: Reads row with amount 100$ in Transaction T1, it holds a row level lock. Thread B: Reads row in Transaction T2 but is blocked as T1 holds a lock, So it waits till timeout happens \u0026amp; retry. Thread A: Adds 10$, new amount is 110$ Thread B: Reads row with updated amount 110$ and updates to 120$\nTypes of locking\nPessimistic Locking - Locks held at row level or table level. Not ideal of high performance \u0026amp; cant scale. Optimistic Locking - Version field is added to the table, JPA ensures that version check is done before saving data, if the version has changed then update will throw Error. Ideal for high performance \u0026amp; can scale. Pessimistic locking\nLockModeType.PESSIMISTIC_READ - Rows are locked and can be read by other transactions, but they cannot be deleted or modified. PESSIMISTIC_READ guarantees repeatable reads. LockModeType.PESSIMISTIC_WRITE - Rows are locked and cannot be read, modified or deleted by other transactions. For PESSIMISTIC_WRITE no phantom reads can occur and access to data must be serialized. LockModeType.PESSIMISTIC_FORCE_INCREMENT - Rows are locked and cannot be read, modified or deleted by other transactions. it forces an increment of the version attribute Lock the row being read to avoid the same row from being updated by 2 different transactions\nselect * from table FOR SHARE - This clause locks the selected rows for read, other threads can read but cant modify. select * from table FOR UPDATE - This clause locks the selected rows for update. This prevents other transactions from reading/modifying these rows until the current transaction is completed (committed or rolled back) select * from table FOR UPDATE SKIP LOCKED clause - This clause tells the database to skip rows that are already locked by another transaction. Instead of waiting for the lock to be released\nOptimistic locking\nLockModeType.OPTIMISTIC - Checks the version attribute of the entity before committing the transaction to ensure no other transaction has modified the entity. LockModeType.OPTIMISTIC_FORCE_INCREMENT - Forces a version increment of the entity, even if the entity has not been modified during the update. Transaction Isolation\nTransaction isolation levels in JPA define the degree to which the operations within a transaction are isolated from the operations in other concurrent transactions JPA, typically using the underlying database and JDBC settings\nIsolation.READ_UNCOMMITTED Read Uncommitted - The lowest level of isolation. Transactions can read uncommitted changes made by other transactions. Isolation.READ_COMMITTED Read Committed - Transactions can only read committed changes made by other transactions. Isolation.REPEATABLE_READ Repeatable Read - If a transaction reads a row, it will get the same data if it reads the row again within the same transaction. Isolation.SERIALIZABLE Serializable - The highest level of isolation. Transactions are completely isolated from one another. Data Consistency\nDirty reads: read UNCOMMITED data from another transaction. Non-repeatable reads: read COMMITTED data from an UPDATE query from another transaction. Phantom reads: read COMMITTED data from an INSERT or DELETE query from another transaction. Dirty Read\nNAME AGE Bob 35 TRANSACTION T1 TRANSACTION T2 select age from table where name = 'Bob'; (35) update table set age = 40 where name = 'Bob'; select age from table where name = 'Bob'; (40) commit; Non-Repeatable Read\nNAME AGE Bob 35 TRANSACTION T1 TRANSACTION T2 select age from table where name = 'Bob'; (35) update table set age = 40 where name = 'Bob'; commit; select age from table where name = 'Bob'; (40) Phantom Read\nNAME AGE Bob 35 TRANSACTION T1 TRANSACTION T2 select count(*) from table where age = 35; (1) insert into table values ('jack', 35); commit; select count(*) from table where age = 35; (2) Behaviour of Isolation Levels\nIsolation Level Dirty Non-Repeatable Reads Phantom Reads Read Uncommitted Yes Yes Yes Read Committed No Yes Yes Read Committed No No Yes Serializable No No No 1spring: 2 jpa: 3 properties: 4 hibernate: 5 connection: 6 isolation: 2 1@Transactional(isolation = Isolation.SERIALIZABLE) 1SHOW default_transaction_isolation; Transaction Propagation\nWhen one transaciton functions calls another in the same class boundary then the parent transaction level is applied. You need to move the function to a different public class if you want its transaction to be enforced. When nested calls happen on transaction boundary then the transaction is suspended.\n@Transactional(readOnly = true) - transaction is readonly and now updates can happen. @Transactional(propagation = Propagation.REQUIRES_NEW) - creates a new transaction. @Transactional(propagation = Propagation.REQUIRED) - default, spring will create a new transaction if not present. @Transactional(propagation = Propagation.MANDATORY) - will throw exception if transaction doesn't exist. @Transactional(propagation = Propagation.SUPPORTS) - if existing transaction present then it will be used, else operation will happen without any transaction. @Transactional(propagation = Propagation.NOT_SUPPORTED) - operation will have with no transaction. @Transactional(propagation = Propagation.NOT_SUPPORTED) - will throw an exception if transaction present. You can define which exception call the rollback and which don't.\n1@Transactional(noRollbackFor = {CustomException.class}, rollbackFor = {RuntimeException.class}) To track transactions\n1logging: 2 level: 3 root: info 4 org.springframework.orm.jpa.JpaTransactionManager: DEBUG Spring keeps the transaction open till the controller returns the response. This is because it thinks that the object may be accessed later in the HTML (web mvc templates). We don't use this, so we will set the below property to false that way transaction is closed after @Transactional function ends.\n1spring: 2 jpa: 3 open-in-view: false By setting auto-commit to false spring won't commit immediately but will commit when the transaction ends.\n1spring: 2 datasource: 3 hikari: 4 auto-commit: false You can also use TransactionTemplate to control transactions if you dont want to use @Transactional and want more control. Try to the transaction boundary small. External calls need to be done outside the transaction context.\n1transactionTemplate.executeWithoutResult() 2transactionTemplate.execute() Code 1package com.demo.project82; 2 3import java.util.concurrent.CountDownLatch; 4import java.util.concurrent.ExecutorService; 5import java.util.concurrent.Executors; 6import java.util.concurrent.TimeUnit; 7 8import com.demo.project82._29_pessimistic_locking.Student29; 9import com.demo.project82._29_pessimistic_locking.repo.Student29Repository; 10import com.demo.project82._29_pessimistic_locking.service.Student29Service; 11import com.demo.project82._30_optimistic_locking.Student30; 12import com.demo.project82._30_optimistic_locking.repo.Student30Repository; 13import com.demo.project82._30_optimistic_locking.service.Student30Service; 14import com.demo.project82._32_transaction.Student32; 15import com.demo.project82._32_transaction.repo.Student32Repository; 16import com.demo.project82._32_transaction.service.Student32Service; 17import lombok.RequiredArgsConstructor; 18import lombok.SneakyThrows; 19import lombok.extern.slf4j.Slf4j; 20import org.springframework.boot.CommandLineRunner; 21import org.springframework.boot.SpringApplication; 22import org.springframework.boot.autoconfigure.SpringBootApplication; 23import org.springframework.context.annotation.Bean; 24import org.springframework.data.jpa.repository.config.EnableJpaAuditing; 25 26@SpringBootApplication 27@EnableJpaAuditing 28@RequiredArgsConstructor 29@Slf4j 30public class Main { 31 32 final Student29Repository student29Repository; 33 final Student30Repository student30Repository; 34 final Student32Repository student32Repository; 35 36 final Student29Service student29Service; 37 final Student30Service student30Service; 38 final Student32Service student32Service; 39 40 /** 41 * You can also use @Async instead of using thread pool. 42 */ 43 ExecutorService threadPool = Executors.newCachedThreadPool(); 44 45 public static void main(String[] args) { 46 SpringApplication.run(Main.class, args); 47 } 48 49 @Bean 50 public CommandLineRunner start() { 51 return (args) -\u0026gt; { 52 try { 53 log.info(\u0026#34;DB Created!\u0026#34;); 54 testOptimisticLocking(); 55 testPessimisticLocking(); 56 testTransaction(); 57 } finally { 58 threadPool.shutdown(); 59 threadPool.awaitTermination(5, TimeUnit.SECONDS); 60 } 61 }; 62 } 63 64 @SneakyThrows 65 public void testOptimisticLocking() { 66 Long studentId = 200l; 67 Student30 student = student30Repository.findById(studentId).orElseThrow(); 68 log.info(\u0026#34;[testOptimisticLocking] Student Before: {}\u0026#34;, student); 69 CountDownLatch latch = new CountDownLatch(2); 70 modifyStudent30(studentId, latch); 71 modifyStudent30(studentId, latch); 72 latch.await(10, TimeUnit.SECONDS); 73 student = student30Repository.findById(studentId).orElseThrow(); 74 log.info(\u0026#34;[testOptimisticLocking] Student After: {}\u0026#34;, student); 75 if (student.getUpdatedCount() != 1) { 76 //We check that only one transaction was applied. 77 throw new RuntimeException(\u0026#34;TEST_ERROR\u0026#34;); 78 } 79 if (student.getAmount() != 110) { 80 throw new RuntimeException(\u0026#34;TEST_ERROR\u0026#34;); 81 } 82 } 83 84 @SneakyThrows 85 public void testPessimisticLocking() { 86 Long studentId = 200l; 87 Student29 student = student29Repository.findById(studentId).orElseThrow(); 88 log.info(\u0026#34;[testPessimisticLocking] Student Before: {}\u0026#34;, student); 89 CountDownLatch latch = new CountDownLatch(2); 90 modifyStudent29(studentId, latch); 91 modifyStudent29(studentId, latch); 92 latch.await(20, TimeUnit.SECONDS); 93 student = student29Repository.findById(studentId).orElseThrow(); 94 log.info(\u0026#34;[testPessimisticLocking] Student After: {}\u0026#34;, student); 95 if (student.getUpdatedCount() != 1) { 96 //We check that only one transaction was applied. 97 throw new RuntimeException(\u0026#34;TEST_ERROR\u0026#34;); 98 } 99 if (student.getAmount() != 110) { 100 throw new RuntimeException(\u0026#34;TEST_ERROR\u0026#34;); 101 } 102 } 103 104 @SneakyThrows 105 public void testTransaction() { 106 Long studentId = 200l; 107 Student32 student = student32Repository.findById(200l).orElseThrow(); 108 log.info(\u0026#34;[testTransaction] Student Before: {}\u0026#34;, student); 109 CountDownLatch latch = new CountDownLatch(1); 110 modifyStudent32(studentId, latch); 111 latch.await(1, TimeUnit.SECONDS); 112 student = student32Repository.findById(studentId).orElseThrow(); 113 log.info(\u0026#34;[testTransaction] Student After: {}\u0026#34;, student); 114 if (!student.getStudentName().equals(\u0026#34;raj\u0026#34;)) { 115 //We check that modification didn\u0026#39;t happen as we used readonly entity 116 throw new RuntimeException(\u0026#34;TEST_ERROR\u0026#34;); 117 } 118 } 119 120 private void modifyStudent29(Long id, CountDownLatch latch) { 121 threadPool.submit(() -\u0026gt; { 122 student29Service.modifyStudent29(id, latch); 123 }); 124 } 125 126 private void modifyStudent30(Long id, CountDownLatch latch) { 127 threadPool.submit(() -\u0026gt; { 128 student30Service.modifyStudent30(id, latch); 129 }); 130 } 131 132 private void modifyStudent32(Long id, CountDownLatch latch) { 133 threadPool.submit(() -\u0026gt; { 134 student32Service.modifyStuden32(id, latch); 135 }); 136 } 137 138} Setup 1# Project82 2 3Spring Data JPA Essentials 4 5### Version 6 7Check version 8 9```bash 10$java --version 11openjdk version \u0026#34;21.0.3\u0026#34; 2024-04-16 LTS 12``` 13 14### Postgres DB 15 16``` 17docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:14 18docker ps 19docker exec -it pg-container psql -U postgres -W postgres 20CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 21CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 22grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 23 24docker stop pg-container 25docker start pg-container 26``` 27 28There is a bug in spring data jpa where `jakarta.persistence.lock.timeout` is not working for postgres. 29Hence set the timeout at database level to 10 seconds. 30 31```bash 32ALTER DATABASE \u0026#34;test-db\u0026#34; SET lock_timeout=10000; 33``` 34 35To look at isolation level 36 37```bash 38SHOW default_transaction_isolation; 39ALTER DATABASE \u0026#34;test-db\u0026#34; SET default_transaction_isolation = \u0026#39;read committed\u0026#39; 40``` 41 42### Dev 43 44To run the backend in dev mode. 45 46```bash 47./gradlew clean build 48./gradlew bootRun 49``` References https://spring.io/projects/spring-data-jpa\n","link":"https://gitorko.github.io/post/optimistic-pessimistic-locking/","section":"post","tags":["optimistic-locking","pessimistic-locking","JPA"],"title":"Spring JPA - Optimistic vs Pessimistic Locking"},{"body":"","link":"https://gitorko.github.io/tags/transactional/","section":"tags","tags":null,"title":"Transactional"},{"body":"","link":"https://gitorko.github.io/categories/actuator/","section":"categories","tags":null,"title":"Actuator"},{"body":"","link":"https://gitorko.github.io/tags/grafana/","section":"tags","tags":null,"title":"Grafana"},{"body":"","link":"https://gitorko.github.io/categories/grafana/","section":"categories","tags":null,"title":"Grafana"},{"body":"","link":"https://gitorko.github.io/tags/jmx/","section":"tags","tags":null,"title":"Jmx"},{"body":"","link":"https://gitorko.github.io/tags/micrometer/","section":"tags","tags":null,"title":"Micrometer"},{"body":"","link":"https://gitorko.github.io/categories/micrometer/","section":"categories","tags":null,"title":"MicroMeter"},{"body":"","link":"https://gitorko.github.io/tags/observability/","section":"tags","tags":null,"title":"Observability"},{"body":"","link":"https://gitorko.github.io/categories/observability/","section":"categories","tags":null,"title":"Observability"},{"body":"","link":"https://gitorko.github.io/tags/prometheus/","section":"tags","tags":null,"title":"Prometheus"},{"body":"","link":"https://gitorko.github.io/categories/prometheus/","section":"categories","tags":null,"title":"Prometheus"},{"body":"Spring Boot Observability\nGithub: https://github.com/gitorko/project71\nApplication monitoring can be classified into\nObservability - Creates metrics, traces, logs that get stored in time-series db and charts are created. eg: Prometheus, Grafana, OpenTelemetry, Jaeger, Zipkin APM (Application Performance Management) - Runs an agents in the jvm that instruments the bytecode and sends metrics to a remote server, focuses on performance and user experience in the application layer. eg: New Relic, Datadog APM, AppDynamics, Dynatrace, Elastic APM Monitoring - Checks endpoint uri to monitor health (cpu, memory) infrastructure-centric for alerting. eg: Nagios, Prometheus Observability Observability is the ability to observe the internal state of a running system from the outside. Observability has 3 pillars\nMetrics: Quantitative data about system performance (e.g., CPU usage, request count, error rates) eg: spring-boot-starter-actuator. Logs: Event-based data for tracking specific actions and events with correlation/span id (e.g., application logs) eg: micrometer-tracing-bridge-brave. Traces: Distributed tracing for tracking the path of a request across services (e.g., tracing API calls) eg: zipkin-reporter-brave. Various tools that help in observability\nPrometheus - An open-source systems monitoring and alerting tool. Prometheus scrapes/collects metrics from an endpoint at regular intervals. Stores the data in a time series database. Grafana - A visualization tool, can pull data from multiple sources (Prometheus) and shows them in graphs. Zipkin - A distributed tracing system. It helps gather timing data needed to troubleshoot latency problems in service architectures. Micrometer is a vendor-neutral instrumentation library that allows you to collect metrics and traces for observability.\nMetrics Collection - Supports Prometheus, Graphite, Datadog, New Relic, etc. Tracing Support - Used with Brave (Zipkin), OpenTelemetry, Wavefront, etc. Logging Context Propagation - Adds tracing IDs in logs for better debugging Spring Integration - Works out of the box with Spring Boot’s Actuator Spring Observability internally uses Micrometer, so in Spring Boot 3+, you should use Spring Observability APIs for new projects\nLogging Tracing adds spans/traces to all logs.\nMetrics A Meter consists of a name and tags, There are 4 main types of meters.\nTimers - Time taken to run something. Counter - Number of time something was run. Gauge - Report data when observed. Gauges can be useful when monitoring stats of cache, collections Distribution summary - Distribution of events. Binders - Built-in binders to monitor the JVM, caches, ExecutorService, and logging services Distributed Tracing Spring Boot samples only 10% of requests to prevent overwhelming the trace backend. Change probability to 1.0 so that every request is sent to the trace backend.\n1management: 2 tracing: 3 sampling: 4 probability: 1.0 Code 1package com.demo.project71.service; 2 3import java.util.Random; 4import java.util.concurrent.TimeUnit; 5 6import com.demo.project71.config.RegistryConfig; 7import io.micrometer.core.annotation.Timed; 8import io.micrometer.observation.Observation; 9import io.micrometer.observation.ObservationRegistry; 10import io.micrometer.observation.annotation.Observed; 11import lombok.RequiredArgsConstructor; 12import lombok.SneakyThrows; 13import lombok.extern.slf4j.Slf4j; 14import org.springframework.context.annotation.Configuration; 15import org.springframework.scheduling.annotation.Async; 16import org.springframework.scheduling.annotation.EnableAsync; 17import org.springframework.stereotype.Service; 18 19@Service 20@RequiredArgsConstructor 21@Slf4j 22@Configuration 23@EnableAsync 24public class GreetService { 25 26 final ObservationRegistry observationRegistry; 27 28 public String sayHello1() { 29 return Observation.createNotStarted(\u0026#34;sayHello1\u0026#34;, observationRegistry).contextualName(\u0026#34;greet.hello-1\u0026#34;).observe(() -\u0026gt; { 30 log.info(\u0026#34;Hello World 1!\u0026#34;); 31 return \u0026#34;Hello World 1!\u0026#34;; 32 }); 33 } 34 35 @Observed(contextualName = \u0026#34;greet.hello-2\u0026#34;) 36 public String sayHello2() { 37 log.info(\u0026#34;Hello World 2!\u0026#34;); 38 return \u0026#34;Hello World 2!\u0026#34;; 39 } 40 41 public String sayHello3() { 42 return Observation.createNotStarted(\u0026#34;greet.hello-3\u0026#34;, observationRegistry).observe(this::sayHello3_NoObs); 43 } 44 45 public String sayHello3_NoObs() { 46 log.info(\u0026#34;Hello World 3!\u0026#34;); 47 return \u0026#34;Hello World 3!\u0026#34;; 48 } 49 50 @Timed(\u0026#34;greet.sayHello4\u0026#34;) 51 @SneakyThrows 52 public String sayHello4() { 53 RegistryConfig.helloApiCounter.increment(); 54 log.info(\u0026#34;Hello World 4!\u0026#34;); 55 int sleepTime = new Random().nextInt(5); 56 log.info(\u0026#34;Sleeping for seconds: {}\u0026#34;, sleepTime); 57 TimeUnit.SECONDS.sleep(sleepTime); 58 return \u0026#34;Hello World 4!\u0026#34;; 59 } 60 61 @SneakyThrows 62 public String sayHello5() throws InterruptedException { 63 log.info(\u0026#34;sayHello5 start - Original span\u0026#34;); 64 inner1(); 65 inner2(); 66 log.info(\u0026#34;sayHello5 end - Original span\u0026#34;); 67 return \u0026#34;Hello World 5!\u0026#34;; 68 } 69 70 public void inner1() { 71 Observation.createNotStarted(\u0026#34;inner1\u0026#34;, observationRegistry).observe(() -\u0026gt; { 72 log.info(\u0026#34;Inner1\u0026#34;); 73 }); 74 } 75 76 public void inner2() { 77 Observation.start(\u0026#34;inner2\u0026#34;, observationRegistry).observe(() -\u0026gt; { 78 log.info(\u0026#34;Inner2\u0026#34;); 79 }); 80 } 81 82 /** 83 * You can add additional values 84 * Low cardinality tags will be added to metrics and traces, while high cardinality tags will only be added to traces. 85 */ 86 public String sayHello6() { 87 return Observation.createNotStarted(\u0026#34;sayHello1\u0026#34;, observationRegistry) 88 .lowCardinalityKeyValue(\u0026#34;locale\u0026#34;, \u0026#34;en-US\u0026#34;) 89 .highCardinalityKeyValue(\u0026#34;userId\u0026#34;, \u0026#34;42\u0026#34;) 90 .contextualName(\u0026#34;greet.hello-6\u0026#34;) 91 .observe(() -\u0026gt; { 92 log.info(\u0026#34;Hello World 6!\u0026#34;); 93 return \u0026#34;Hello World 6!\u0026#34;; 94 }); 95 } 96 97 @Async 98 @SneakyThrows 99 public void asyncHello() { 100 log.info(\u0026#34;Start Async Method\u0026#34;); 101 TimeUnit.SECONDS.sleep(1); 102 log.info(\u0026#34;End Async Method\u0026#34;); 103 } 104} 1package com.demo.project71.config; 2 3import io.micrometer.core.aop.TimedAspect; 4import io.micrometer.core.instrument.Counter; 5import io.micrometer.core.instrument.MeterRegistry; 6import io.micrometer.core.instrument.Metrics; 7import jakarta.annotation.PostConstruct; 8import org.springframework.beans.factory.annotation.Value; 9import org.springframework.boot.actuate.autoconfigure.metrics.MeterRegistryCustomizer; 10import org.springframework.context.annotation.Bean; 11import org.springframework.context.annotation.Configuration; 12import org.springframework.context.annotation.EnableAspectJAutoProxy; 13 14@Configuration 15@EnableAspectJAutoProxy 16public class RegistryConfig { 17 18 public static Counter helloApiCounter; 19 20 /** 21 * Applies common tags on all Meters 22 */ 23 @Bean 24 MeterRegistryCustomizer\u0026lt;MeterRegistry\u0026gt; configurer(@Value(\u0026#34;${spring.application.name}\u0026#34;) String applicationName) { 25 return registry -\u0026gt; registry.config().commonTags(\u0026#34;application\u0026#34;, applicationName); 26 } 27 28 /** 29 * Enables @Timed annotation 30 */ 31 @Bean 32 public TimedAspect timedAspect(MeterRegistry registry) { 33 return new TimedAspect(registry); 34 } 35 36 /** 37 * Creates a Meter 38 */ 39 @PostConstruct 40 public void postInit() { 41 helloApiCounter = Metrics.counter(\u0026#34;hello.api.count\u0026#34;, \u0026#34;type\u0026#34;, \u0026#34;order\u0026#34;); 42 } 43} 1package com.demo.project71.config; 2 3import java.util.concurrent.Executor; 4import java.util.concurrent.Executors; 5 6import io.micrometer.context.ContextExecutorService; 7import io.micrometer.context.ContextSnapshotFactory; 8import lombok.RequiredArgsConstructor; 9import org.springframework.context.annotation.Configuration; 10import org.springframework.scheduling.annotation.AsyncConfigurer; 11 12@Configuration 13@RequiredArgsConstructor 14class ThreadConfig implements AsyncConfigurer { 15 16 @Override 17 public Executor getAsyncExecutor() { 18 return ContextExecutorService.wrap(Executors.newFixedThreadPool(5), ContextSnapshotFactory.builder().build()::captureAll); 19 } 20 21} Postman Import the postman collection to postman\nPostman Collection\nSetup 1# Project 71 2 3Spring Observability 4 5[https://gitorko.github.io/spring-boot-observability/](https://gitorko.github.io/spring-boot-observability/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk 21.0 14``` 15 16### Postgres DB 17 18``` 19docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:9.6.10 20docker ps 21docker exec -it pg-container psql -U postgres -W postgres 22CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 23CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 24grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 25 26docker stop pg-container 27docker start pg-container 28``` 29 30### Zipkin 31 32To run zipkin server use the docker command 33 34```bash 35docker run -d -p 9411:9411 --name my-zipkin openzipkin/zipkin 36 37docker stop my-zipkin 38docker start my-zipkin 39``` 40 41Login to zipkin UI, wait for few seconds for server to be up. 42 43[http://localhost:9411/zipkin/](http://localhost:9411/zipkin/) 44 45## Prometheus 46 47Update the target ip-address in the prometheus.yml file, don\u0026#39;t use localhost when using docker container 48 49To start the prometheus docker instance build the docker image \u0026amp; run the image. 50 51```bash 52cd project71 53docker build -f docker/Dockerfile --force-rm -t my-prometheus . 54docker run -d -p 9090:9090 --name my-prometheus my-prometheus 55 56docker stop my-prometheus 57docker start my-prometheus 58``` 59 60[http://localhost:9090](http://localhost:9090) 61 62## Grafana 63 64To start the grafana docker instance run the command. 65 66```bash 67docker run --name my-grafana -d -p 3000:3000 grafana/grafana 68 69docker stop my-grafana 70docker start my-grafana 71``` 72 73[http://localhost:3000](http://localhost:3000) 74 75``` 76user: admin 77password: admin 78``` 79 80### Dev 81 82To run the code. 83 84```bash 85./gradlew clean build 86./gradlew bootRun 87``` Open zipkin dashboard\nhttp://localhost:9411/zipkin/\nOpen prometheus dashboard\nhttp://localhost:9090\nOpen grafana dashboard\nhttp://localhost:3000\n1user: admin 2password: admin Add the prometheus data source, make sure it's the ip address of your system, don't add localhost\nhttp://IP-ADDRESS:9090\nThere are existing grafana dashboards that can be imported. Import a dashboard, Download the json file or copy the ID of the dashboard for micrometer dashboard.\nhttps://grafana.com/dashboards/4701\nCreate a custom dashboard, Add a new panel, add 'hello_api_count_total' metric in the query, save the dashboard.\nReferences https://micrometer.io/docs\nhttps://prometheus.io/\nhttps://grafana.com/\nhttps://grafana.com/grafana/dashboards/4701\nhttps://grafana.com/grafana/dashboards/\n","link":"https://gitorko.github.io/post/spring-observability/","section":"post","tags":["spring","spring-boot","prometheus","grafana","jmx","micrometer","observability","tracing","spring-security"],"title":"Spring Boot - Observability"},{"body":"","link":"https://gitorko.github.io/tags/spring-security/","section":"tags","tags":null,"title":"Spring-Security"},{"body":"","link":"https://gitorko.github.io/tags/tracing/","section":"tags","tags":null,"title":"Tracing"},{"body":"","link":"https://gitorko.github.io/categories/tracing/","section":"categories","tags":null,"title":"Tracing"},{"body":"","link":"https://gitorko.github.io/tags/auditing/","section":"tags","tags":null,"title":"Auditing"},{"body":"","link":"https://gitorko.github.io/tags/checkstyle/","section":"tags","tags":null,"title":"Checkstyle"},{"body":"","link":"https://gitorko.github.io/tags/jacoco/","section":"tags","tags":null,"title":"Jacoco"},{"body":"","link":"https://gitorko.github.io/tags/jdk21/","section":"tags","tags":null,"title":"Jdk21"},{"body":"","link":"https://gitorko.github.io/categories/jdk21/","section":"categories","tags":null,"title":"JDK21"},{"body":"","link":"https://gitorko.github.io/tags/spotbugs/","section":"tags","tags":null,"title":"Spotbugs"},{"body":"Virtual Threads Virtual threads (Project Loom) is part of Java 21. Any blocking operation doesn't cause the thread to block. Thread Pool are replaced with a virtual thread executor.\nGithub: https://github.com/gitorko/project58\nTo enable virtual threads in spring boot application\n1spring.threads.virtual.enabled=true Code 1package com.demo.project58.controller; 2 3import java.util.List; 4import java.util.UUID; 5 6import com.demo.project58.pojo.Customer; 7import com.demo.project58.service.CustomerService; 8import lombok.extern.slf4j.Slf4j; 9import org.springframework.beans.factory.annotation.Autowired; 10import org.springframework.data.domain.Page; 11import org.springframework.data.domain.PageRequest; 12import org.springframework.http.ResponseEntity; 13import org.springframework.transaction.annotation.Transactional; 14import org.springframework.web.bind.annotation.DeleteMapping; 15import org.springframework.web.bind.annotation.GetMapping; 16import org.springframework.web.bind.annotation.PathVariable; 17import org.springframework.web.bind.annotation.PostMapping; 18import org.springframework.web.bind.annotation.PutMapping; 19import org.springframework.web.bind.annotation.RequestBody; 20import org.springframework.web.bind.annotation.RequestMapping; 21import org.springframework.web.bind.annotation.RequestParam; 22import org.springframework.web.bind.annotation.RestController; 23import org.springframework.web.client.RestClient; 24 25@RestController 26@RequestMapping(\u0026#34;/customer\u0026#34;) 27@Slf4j 28public class CustomerController { 29 30 @Autowired 31 private CustomerService customerService; 32 33 @Autowired 34 private RestClient restClient; 35 36 @GetMapping(\u0026#34;/greet/{name}\u0026#34;) 37 public String greet(@PathVariable String name) { 38 return customerService.greet(name); 39 } 40 41 @PostMapping(\u0026#34;/save\u0026#34;) 42 @Transactional 43 public Customer saveCustomer(@RequestBody Customer customer) { 44 return customerService.save(customer); 45 } 46 47 @GetMapping(\u0026#34;/all\u0026#34;) 48 public List\u0026lt;Customer\u0026gt; findAll() { 49 return customerService.findAll(); 50 } 51 52 @GetMapping(\u0026#34;/{id}\u0026#34;) 53 public Customer findById(@PathVariable UUID id) { 54 return customerService.findById(id); 55 } 56 57 @PutMapping(value = \u0026#34;/update\u0026#34;) 58 public Customer update(@RequestBody Customer customer) { 59 return customerService.save(customer); 60 } 61 62 @DeleteMapping(value = \u0026#34;/{id}\u0026#34;) 63 public void delete(@PathVariable UUID id) { 64 customerService.deleteById(id); 65 } 66 67 @GetMapping(\u0026#34;/find\u0026#34;) 68 public List\u0026lt;Customer\u0026gt; find(@RequestParam String name, @RequestParam Integer age) { 69 return customerService.findByNameAndAge(name, age); 70 } 71 72 @GetMapping(\u0026#34;/page\u0026#34;) 73 public Page\u0026lt;Customer\u0026gt; findPage(@RequestParam(\u0026#34;page\u0026#34;) int page, @RequestParam(\u0026#34;size\u0026#34;) int size) { 74 return customerService.findAll(PageRequest.of(page, size)); 75 } 76 77 @GetMapping(\u0026#34;/search\u0026#34;) 78 public List\u0026lt;Customer\u0026gt; search(Customer customer) { 79 return customerService.search(customer); 80 } 81 82 @GetMapping(\u0026#34;/block/{seconds}\u0026#34;) 83 public String block(@PathVariable Integer seconds) { 84 ResponseEntity\u0026lt;Void\u0026gt; result = restClient.get() 85 .uri(\u0026#34;/delay/\u0026#34; + seconds) 86 .retrieve() 87 .toBodilessEntity(); 88 89 log.info(\u0026#34;{} on {}\u0026#34;, result.getStatusCode(), Thread.currentThread()); 90 return Thread.currentThread().toString(); 91 } 92} 1spring: 2 threads: 3 virtual: 4 enabled: true 5 task: 6 execution: 7 simple: 8 concurrency-limit: 10 9 scheduling: 10 simple: 11 concurrency-limit: 10 12 main: 13 banner-mode: \u0026#34;off\u0026#34; 14 datasource: 15 driver-class-name: org.postgresql.Driver 16 url: jdbc:postgresql://localhost:5432/test-db 17 username: test 18 password: test@123 19 jpa: 20 show-sql: false 21 liquibase: 22 enabled: true 23 change-log: db/changelog/db.changelog-main.yaml 24logging: 25 level: 26 org.springframework.data.jpa: DEBUG Setup 1# Project 58 2 3Spring Virtual Threads \u0026amp; Unit Testing 4 5Checkstyle, SpotBugs, JaCoCo code coverage 6 7[https://gitorko.github.io/spring-virtual-threads/](https://gitorko.github.io/spring-virtual-threads/) 8 9### Version 10 11Check version 12 13```bash 14$java --version 15openjdk 21 16``` 17 18Check apache benchmark 19```bash 20$ ab -V 21This is ApacheBench, Version 2.3 \u0026lt;$Revision: 1903618 $\u0026gt; 22``` 23 24### Postgres DB 25 26``` 27docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:9.6.10 28docker ps 29docker exec -it pg-container psql -U postgres -W postgres 30CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 31CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 32grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 33 34docker stop pg-container 35docker start pg-container 36``` 37 38### Dev 39 40To build the code. 41 42```bash 43./gradlew clean build 44``` 45 46```bash 47ab -n 10 -c 2 http://localhost:8080/customer/block/3 48``` References https://spring.io/blog/2022/10/11/embracing-virtual-threads\n","link":"https://gitorko.github.io/post/spring-virtual-threads/","section":"post","tags":["virtual-threads","spring","jdk21","auditing","liquibase","jacoco","spotbugs","checkstyle"],"title":"Spring Virtual Threads"},{"body":"","link":"https://gitorko.github.io/tags/virtual-threads/","section":"tags","tags":null,"title":"Virtual-Threads"},{"body":"","link":"https://gitorko.github.io/categories/virtualthreads/","section":"categories","tags":null,"title":"VirtualThreads"},{"body":"","link":"https://gitorko.github.io/tags/azul/","section":"tags","tags":null,"title":"Azul"},{"body":"","link":"https://gitorko.github.io/tags/correto/","section":"tags","tags":null,"title":"Correto"},{"body":"","link":"https://gitorko.github.io/tags/garbage-collector/","section":"tags","tags":null,"title":"Garbage-Collector"},{"body":"You write code in Java once, compile it to bytecode, then it can be run on any machine by the JVM where bytecode is interpreted for the underlying platforms/OS code. (\u0026quot;write once, run anywhere\u0026quot;) JVM can support other languages like Scala, Kotlin and Groovy\nJava Virtual Machine (JVM) Architecture Components of JVM\nClass Loader Runtime Memory/Data Area Execution Engine Class Loader Three phases\nLoading Linking Initialization Loading\nThree built-in class loaders:\nBootstrap - Root class loader. Loads libraries which are present in the rt.jar \u0026amp; $JAVA_HOME/jre/lib. eg: java.lang, java.net, java.util, java.io Extension - Loads libraries which are present in the $JAVA_HOME/jre/lib/ext Application - Loads classes present on the classpath. The JVM uses the ClassLoader.loadClass() or Class.forName() method for loading the class into memory. It tries to load the class based on a fully qualified name. The first class to be loaded into memory is usually the class that contains the main() method.\nHierarchical class loading\nLinking\nVerification - Checks the structural correctness of the .class file. eg: Running java11 on java8 we get VerifyException Preparation - Allocates memory for the static fields \u0026amp; initializes them with default values. Resolution - Symbolic references are replaced with direct references. Initialization\nExecutes the initialization method of the class eg: constructor, executing the static block, assigning values to all the static variables etc.\nRuntime Data Area Five components:\nMethod Area - Class level data such as the run-time constant pool, field, and method data, and the code for methods and constructors, are stored here Heap Area - Objects and their corresponding instance variables are stored here Stack Area - When a new thread is created, a separate runtime stack is also created at the same time. All local variables, method calls, and partial results are stored in the stack area. Program Counter (PC) Registers - Each thread has its own PC Register to hold the address of the currently executing JVM instruction. Native Method Stacks - JVM contains stacks that support native methods Execution Engine JVM can use an interpreter or a JIT compiler for the execution engine\nInterpreter - Reads and executes the bytecode instructions line by line, slower JIT Compiler - Compiles the entire bytecode and changes it to native machine code, Uses the interpreter to execute the byte code, but when it finds some repeated code, it uses the JIT compiler Garbage Collector Garbage collection is the process of automatically reclaiming unused memory by destroying unused object. Garbage collection provides automation memory management in java.\nObjects get created on the heap.\nLive - Objects are being used and referenced from somewhere else Dead - Objects are no longer used or referenced from anywhere To make a live object dead\nMake the reference null.\n1Customer customer = new Customer(); 2customer = null; Assign reference to another object.\n1Customer customer = new Customer(); 2customer = new Customer(); Use anonymous object.\n1myFunction(new Customer()); All objects are linked to a Garbage Root Object via graph. Garbage collector traverses the whole object graph in memory, starting from root and following references from the roots to other objects.\nPhases of Garbage Collection:\nMark - GC identifies the unused objects in memory Sweep - GC removes the objects identified during the previous phase Compact - Compacts fragmented space so that objects are in contiguous block Garbage Collections is done automatically by the JVM at regular intervals. It can also be triggered by calling System.gc(), but the execution is not guaranteed.\nGenerational garbage collection strategy that categorizes objects by age and moves them to different region.\nJVM is divided into three sections\nYoung Generation Old Generation Permanent Generation Young Generation\nNewly created objects start in the Young Generation. When objects are garbage collected from the Young Generation, it is a minor garbage collection event. When surviving objects reach a certain threshold of moving around the survivor spaces, they are moved to the Old Generation. Use the -Xmn flag to set the size of the Young Generation\nThe Young Generation is further subdivided\nEden space - All new objects start here, and initial memory is allocated to them Survivor spaces - Objects are moved here from Eden after surviving one garbage collection cycle. Old Generation\nObjects that are long-lived are eventually moved from the Young Generation to the Old Generation When objects are garbage collected from the Old Generation, it is a major garbage collection event.\nUse the -Xms and -Xmx flags to set the size of the initial and maximum size of the Heap memory.\nPermanent Generation\nDeprecated since java 8 Metadata of classes and methods are stored in perm-gen.\nMetaSpace\nStarting with Java 8, the MetaSpace memory space replaces the PermGen space. Metaspace is automatically resized hence applications won't run out of memory if the classes are big.\nPhases of GC\nMinor GC - Happens on Young generation. Major GC - Happens on Old generation. Stop of the world event, program pauses till memory is cleaned. Least pause time is always preferred. Algorithms\nMark-Copy - Happens in Young generation Marks all live objects Then copies from eden space to survivor space (S1/S2), At any given point either S1 or S2 is always empty. Then entire eden space is treated as empty. Mark-Sweep-Compact - Happens in Old generation. Marks all live objects. Sweep/Reclaim all dead object. Releases memory Compaction - Move all live objects to left so that are next to each other in continuous block. Types of garbage collector:\n-XX:+UseSerialGC - Serial garbage collector. Single thread for both minor \u0026amp; major gc. XX:+UseParallelGC - Parallel garbage collector. Multiple thread for both minor gc \u0026amp; single/multiple thread for major gc. Doesn't run concurrently with application. The pause time is longest. eg: Batch jobs XX:+UseConcMarkSweepGC - CMS (Concurrent Mark \u0026amp; Sweep) Deprecated since java 9. Multiple thread for both minor \u0026amp; major gc. Concurrent Mark \u0026amp; Sweep. Runs concurrently with application to mark live objects. The pause time is minimal. eg: CPU intensive. -XX:+UseG1GC - G1 (Garbage first) garbage collector. Entire heap is divided to multiple regions that can be resized. A region can be either young or old. Identifies the regions with the most garbage and performs garbage collection on that region first, it is called Garbage First The pause time is predictable as regions are small. -XX:+UseEpsilonGC - Epsilon collector - Do nothing collector. JVM shutsdown once heap is full. Used for zero pause time application provided memory is planned. -XX:+UseShenandoahGC - Shenandoah collector - Similar to G1, but runs concurrently with application. CPU intensive. -XX:+UseZGC - ZGC collector - Suitable for low pause time (2 ms pauses) and large heap. GC performed while application running. Treats the entire heap as a single generation, performing garbage collection uniformly across all objects. Cant specify pause time. -XX:+UseZGC -XX:+ZGenerational Generation ZGC - ZGC splits the heap into two logical generations young and old. The GC can focus on collecting younger and more promising objects more often without increasing pause time, keeping them under 1 millisecond Garbage Collectors When to use Serial Small data sets (~100 MB max)Limited resources (e.g., single core)Low pause times Parallel Peak performance on multi-core systemsWell suited for high computational loads more than 1-second pauses are acceptable G1 /CMS Response time \u0026gt; throughputLarge heapPauses \u0026lt; 1 sec Shenandoah Minimize pause timesPredicatable latencies ZGC Response time is high-priority, and/orVery large heap Epsilon GC Performance testing and troubleshooting Java Native Interface (JNI) Java supports the execution of native code via the Java Native Interface (JNI). Use the native keyword to indicate that the method implementation will be provided by a native library. Invoke System.loadLibrary() to load the shared native library into memory.\nNative Method Libraries Libraries that are written in other programming languages, such as C, C++, and assembly. These libraries are usually present in the form of .dll or .so files. These native libraries can be loaded through JNI.\nJVM errors ClassNotFoundExcecption - Class Loader is trying to load classes using Class.forName(), ClassLoader.loadClass() or ClassLoader.findSystemClass() but no definition for the class with the specified name is found. NoClassDefFoundError - Compiler has successfully compiled the class, but the Class Loader is not able to locate the class file at the runtime. OutOfMemoryError - Cannot allocate an object because it is out of memory, and no more memory could be made available by the garbage collector. StackOverflowError - Ran out of space while creating new stack frames while processing a thread. JVM Flags Throughput - Throughput refers to the amount of time spent on actual application work versus the total time spent on garbage collection activities. High Throughput indicates that the application spends more time executing its tasks rather than performing GC Latency - Latency in GC refers to the pause times experienced by the application during garbage collection activities. Low Latency indicates that GC pauses are short and predictable, allowing the application to quickly resume its tasks Flags Purpose -XX:+UseStringDeduplication All string in java are stored in string pool but if you used the new operator then string are created on heap. To remove duplicate string in heap during GC you can use this flag. -XX:+SoftMaxHeapSize Set in Gen ZGC it allows GC to operate within this limit and will goto max only to prevent application from stalling. -XX:+AlwaysPreTouch Heap preparation is done at startup. -Xmx=\u0026lt;size\u0026gt; -Xms=\u0026lt;size\u0026gt; The max heap size \u0026amp; min heap size can be set to same size to avoid latency caused when returning unused memory to RAM by the JVM -XX:-ZUncommit Prevents unused memory from being returned to the RAM by the JVM -XX:ZUncommitDelay=\u0026lt;time\u0026gt; Delay before unused memory returned to the RAM by the JVM -XX:+UseTransparentHugePages If you have large objects then dedicated section of the heap is used to store them. OS should support Transparent Huge Pages (THP) -XX:MaxGCPauseMillis Provides max tolerable pause time for GC in G1 -XX:+UseAOT Enables AOT compilation -XX:ParallelGCThreads Parallel GC Threads -XX:ConcGCThreads Concurrent GC Threads -Xss Thread stack size -XX:+DisableExplicitGC Disable GC -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=\u0026lt;file\u0026gt; Heap Dump on OutOfMemoryError -XX:MetaspaceSize=\u0026lt;size\u0026gt; -XX:MaxMetaspaceSize=\u0026lt;size\u0026gt; Metaspace size -XX:G1HeapRegionSize=\u0026lt;size\u0026gt; G1 region size -XX:ZAllocationSpikeTolerance=\u0026lt;value\u0026gt; How much ZGC should over-provision memory to handle allocation spikes -Xlog:gc* GC logging -XX:ZGenYoungSize=\u0026lt;size\u0026gt; Gen-ZGC Young Generation Size -XX:ZGenYoungSize=\u0026lt;size\u0026gt; Gen-ZGC Old Generation Size -XX:ZGenMaxYoungGCCollectionTimeMillis=\u0026lt;time\u0026gt; Max Young GC Pause Time JIT (Just in Time Compilation) vs AOT (Ahead of Time Compilation) JIT happens at runtime, jvm determines hotspot in code that are frequently executed and compiles to native code. eg: HotSpot JVM AOT happens before runtime, bytecode is compiled to native code based on static analysis before program is executed. eg: GraalVM\nDistributions Azul Platform Core https://www.azul.com/downloads/ Amazon Corretto https://aws.amazon.com/corretto/ Red Hat OpenJDK https://developers.redhat.com/products/openjdk/download Eclipse Temurin https://adoptium.net/en-GB/temurin/releases/ Versions To deal with different versions of java you can use jenv\n1brew install jenv Set the jdk version either globally or locally\n1jenv versions 2jenv version 3jenv global 11.0 4jenv local 17.0 To add existing jdk\n1jenv add /Users/username/Library/Java/JavaVirtualMachines/azul-17.0.11/Contents/Home Tools You can also download the various tools needed to work with java\nVisualVM - https://visualvm.github.io/ Memory Analyzer - https://www.eclipse.org/mat/ Mission Control - https://www.azul.com/products/components/azul-mission-control/ References https://www.youtube.com/watch?v=XXOaCV5xm9s\u0026amp;ab_channel=Geekific https://www.youtube.com/watch?v=2PIBF92iOvQ\u0026amp;ab_channel=Java https://www.youtube.com/watch?v=wpkbJGRCwRo\u0026amp;ab_channel=Java\nhttps://docs.oracle.com/en/java/javase/21/gctuning/z-garbage-collector.html#GUID-8637B158-4F35-4E2D-8E7B-9DAEF15BB3CD https://wiki.openjdk.org/display/zgc/Main https://inside.java/2023/11/28/gen-zgc-explainer/\n","link":"https://gitorko.github.io/post/open-jdk/","section":"post","tags":["azul","openjdk","correto","garbage-collector","jvm","memory"],"title":"Java Virtual Machine Architecture"},{"body":"","link":"https://gitorko.github.io/categories/jdk/","section":"categories","tags":null,"title":"JDK"},{"body":"","link":"https://gitorko.github.io/tags/jvm/","section":"tags","tags":null,"title":"Jvm"},{"body":"","link":"https://gitorko.github.io/categories/jvm/","section":"categories","tags":null,"title":"JVM"},{"body":"","link":"https://gitorko.github.io/tags/memory/","section":"tags","tags":null,"title":"Memory"},{"body":"","link":"https://gitorko.github.io/tags/openjdk/","section":"tags","tags":null,"title":"Openjdk"},{"body":"","link":"https://gitorko.github.io/tags/jobrunr/","section":"tags","tags":null,"title":"Jobrunr"},{"body":"","link":"https://gitorko.github.io/categories/jobrunr/","section":"categories","tags":null,"title":"JobRunr"},{"body":"Spring Boot 3 integration with JobRunr\nGithub: https://github.com/gitorko/project59\nJobRunr JobRunr is a distributed job scheduler. If a service runs on many nodes the JobRunr ensure that a scheduled job is run only on a single instance. If you run a spring @Scheduled annotation then all instances will start the same job, you can use shedlock library to prevent this but this requires extra code.\nTypes of Job\nFire-Forget Delayed Recurring Job Advantages\nIt lets you schedule background jobs using lambda. The jobs can run on a distributed nodes, more node that join, the work gets distributed. It serializes the lambda as JSON and stores it in db. It also contains an automatic retry feature with an exponential back-off policy for failed jobs. There is also a built-in dashboard that allows you to monitor all jobs. It is self-maintaining, Successful jobs are automatically deleted after a configurable amount of time, so there is no need to perform manual storage cleanup. The job details are stored in db. Code 1package com.demo.project59.controller; 2 3import java.time.LocalDateTime; 4import java.time.format.DateTimeFormatter; 5import java.util.concurrent.atomic.AtomicInteger; 6 7import com.demo.project59.service.AppService; 8import lombok.RequiredArgsConstructor; 9import lombok.extern.slf4j.Slf4j; 10import org.jobrunr.jobs.context.JobContext; 11import org.jobrunr.scheduling.JobScheduler; 12import org.springframework.http.ResponseEntity; 13import org.springframework.web.bind.annotation.GetMapping; 14import org.springframework.web.bind.annotation.PathVariable; 15import org.springframework.web.bind.annotation.RestController; 16 17@RestController 18@Slf4j 19@RequiredArgsConstructor 20public class HomeController { 21 22 final AppService appService; 23 final JobScheduler jobScheduler; 24 DateTimeFormatter dtFormat = DateTimeFormatter.ofPattern(\u0026#34;ddMMyyyyHHmmss\u0026#34;); 25 26 @GetMapping(\u0026#34;/invoke-job\u0026#34;) 27 public ResponseEntity invokeJob() { 28 String jobTag = \u0026#34;job_\u0026#34; + dtFormat.format(LocalDateTime.now()); 29 //These will enqueue jobs, will run on a different thread 30 jobScheduler.enqueue(() -\u0026gt; { 31 appService.doJobWithTag(jobTag); 32 }); 33 jobScheduler.enqueue(() -\u0026gt; { 34 appService.doFireAndForgetWork(); 35 }); 36 jobScheduler.enqueue(() -\u0026gt; { 37 appService.doDelayedWorkWithProgressBar(JobContext.Null); 38 }); 39 log.info(\u0026#34;Job enqueued!\u0026#34;); 40 return ResponseEntity.ok().build(); 41 } 42 43 @GetMapping(\u0026#34;/direct-call\u0026#34;) 44 public ResponseEntity diretCall() { 45 appService.directCall(); 46 log.info(\u0026#34;Job submitted!\u0026#34;); 47 return ResponseEntity.ok().build(); 48 } 49 50 @GetMapping(\u0026#34;/retry-job\u0026#34;) 51 public ResponseEntity retryJob() { 52 jobScheduler.enqueue(() -\u0026gt; { 53 appService.doJobRetry(); 54 }); 55 log.info(\u0026#34;Retry Job submitted!\u0026#34;); 56 return ResponseEntity.ok().build(); 57 } 58 59 /** 60 * When there are pool of workers they will pick the jobs from queue and complete them. 61 * This distributes the work across many nodes. 62 */ 63 @GetMapping(\u0026#34;/many-job/{count}\u0026#34;) 64 public ResponseEntity submitManyJobs(@PathVariable Integer count) { 65 AtomicInteger counter = new AtomicInteger(); 66 for (int i = 0; i \u0026lt; count; i++) { 67 String jobTag = \u0026#34;job_\u0026#34; + counter.getAndIncrement(); 68 jobScheduler.enqueue(() -\u0026gt; { 69 appService.doJobWithTag(jobTag); 70 }); 71 } 72 log.info(\u0026#34;Many Jobs submitted!\u0026#34;); 73 return ResponseEntity.ok().build(); 74 } 75 76} 77 1package com.demo.project59.service; 2 3import java.time.Instant; 4import java.util.concurrent.TimeUnit; 5 6import com.demo.project59.domain.Customer; 7import com.demo.project59.repository.CustomerRepository; 8import lombok.RequiredArgsConstructor; 9import lombok.SneakyThrows; 10import lombok.extern.slf4j.Slf4j; 11import org.jobrunr.jobs.annotations.Job; 12import org.jobrunr.jobs.annotations.Recurring; 13import org.jobrunr.jobs.context.JobContext; 14import org.jobrunr.jobs.context.JobDashboardProgressBar; 15import org.jobrunr.scheduling.JobScheduler; 16import org.jobrunr.scheduling.cron.Cron; 17import org.springframework.stereotype.Service; 18 19@Service 20@Slf4j 21@RequiredArgsConstructor 22public class AppService { 23 24 final CustomerRepository customerRepository; 25 final JobScheduler jobScheduler; 26 27 @SneakyThrows 28 public void doDelayedWork() { 29 log.info(\u0026#34;Running doWork\u0026#34;); 30 TimeUnit.SECONDS.sleep(1); 31 log.info(\u0026#34;Completed doWork\u0026#34;); 32 } 33 34 @Job(name = \u0026#34;doJob [jobTag: %0]\u0026#34;) 35 @SneakyThrows 36 public void doJobWithTag(String jobTag) { 37 log.info(\u0026#34;Running doJob\u0026#34;); 38 TimeUnit.MINUTES.sleep(1); 39 log.info(\u0026#34;Completed doJob\u0026#34;); 40 } 41 42 /** 43 * Once the job fails it can be picked for next retry by any other server in the pool. 44 * So don\u0026#39;t use AtomicInteger to track count 45 */ 46 @Job(name = \u0026#34;doJobRetry\u0026#34;, retries = 3) 47 @SneakyThrows 48 public void doJobRetry() { 49 log.info(\u0026#34;Running doJobRetry\u0026#34;); 50 Customer customer = customerRepository.findById(200l).orElseThrow(); 51 customer.setInvokeCount(customer.getInvokeCount() + 1); 52 customerRepository.save(customer); 53 log.info(\u0026#34;Updated customer invoke count!\u0026#34;); 54 if (customer.getInvokeCount() \u0026lt; 3) { 55 throw new RuntimeException(\u0026#34;Will not work first 2 times\u0026#34;); 56 } 57 log.info(\u0026#34;Completed doJobRetry\u0026#34;); 58 } 59 60 @Job(name = \u0026#34;doFireAndForgetWork\u0026#34;) 61 @SneakyThrows 62 public void doFireAndForgetWork() { 63 log.info(\u0026#34;Running fireForget\u0026#34;); 64 TimeUnit.MINUTES.sleep(1); 65 log.info(\u0026#34;Completed fireForget\u0026#34;); 66 } 67 68 @Job(name = \u0026#34;hourlyJob\u0026#34;) 69 @Recurring(id = \u0026#34;hourlyJob\u0026#34;, cron = \u0026#34;0 * * * *\u0026#34;, zoneId = \u0026#34;Asia/Kolkata\u0026#34;) 70 @SneakyThrows 71 public void doHourlyWork() { 72 log.info(\u0026#34;Running hourlyJob\u0026#34;); 73 TimeUnit.SECONDS.sleep(5); 74 log.info(\u0026#34;Completed hourlyJob\u0026#34;); 75 } 76 77 @Job(name = \u0026#34;doDelayedWorkWithProgressBar\u0026#34;) 78 @SneakyThrows 79 public void doDelayedWorkWithProgressBar(JobContext jobContext) { 80 JobDashboardProgressBar bar = jobContext.progressBar(100); 81 for (int i = 0; i \u0026lt; 10; i++) { 82 log.info(\u0026#34;Running longJob\u0026#34;); 83 //progress by 10% each time 84 bar.setProgress(10 * i); 85 TimeUnit.SECONDS.sleep(5); 86 log.info(\u0026#34;Completed fireForget\u0026#34;); 87 } 88 } 89 90 public void directCall() { 91 //Recurring 92 jobScheduler.scheduleRecurrently(Cron.hourly(), () -\u0026gt; { 93 doDelayedWork(); 94 }); 95 96 //Fire Forget 97 jobScheduler.enqueue(() -\u0026gt; { 98 doDelayedWork(); 99 }); 100 101 //Delay job, scheduled to run in future 102 jobScheduler.schedule(Instant.now().plusSeconds(30), () -\u0026gt; { 103 doDelayedWork(); 104 }); 105 } 106} 1spring: 2 main: 3 banner-mode: \u0026#34;off\u0026#34; 4 datasource: 5 driver-class-name: org.postgresql.Driver 6 url: jdbc:postgresql://localhost:5432/test-db 7 username: test 8 password: test@123 9 jpa: 10 show-sql: false 11 hibernate.ddl-auto: create 12 properties.hibernate.temp.use_jdbc_metadata_defaults: false 13 database-platform: org.hibernate.dialect.PostgreSQLDialect 14 defer-datasource-initialization: true 15 sql: 16 init: 17 mode: always 18org: 19 jobrunr: 20 job-scheduler: 21 enabled: true 22 dashboard: 23 enabled: true 24 port: 8085 25 background-job-server: 26 enabled: true 27 poll-interval-in-seconds: 5 #check for new work every 5 seconds 28 worker-count: 10 #this value normally is defined by the amount of CPU\u0026#39;s that are available 29 delete-succeeded-jobs-after: 36 #succeeded jobs will go to the deleted state after 36 hours 30 permanently-delete-deleted-jobs-after: 72 #deleted jobs will be deleted permanently after 72 hours 31 database: 32 table-prefix: \u0026#34;project59_\u0026#34; # allows to set a table prefix 33 jobs: 34 default-number-of-retries: 10 #the default number of retries for a failing job 35 retry-back-off-time-seed: 3 #the default time seed for the exponential back-off policy. 36 metrics: 37 enabled: true #Micrometer integration Open dashboard: http://localhost:8000/dashboard/\nSetup 1# Project 59 2 3Spring Boot JobRunr 4 5[https://gitorko.github.io/spring-jobrunr/](https://gitorko.github.io/spring-jobrunr/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk version \u0026#34;21.0.3\u0026#34; 2024-04-16 LTS 14``` 15 16### Postgres DB 17 18``` 19docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:14 20docker ps 21docker exec -it pg-container psql -U postgres -W postgres 22CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 23CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 24grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 25 26docker stop pg-container 27docker start pg-container 28``` 29 30### Dev 31 32To run the code 33 34```bash 35./gradlew clean build 36./gradlew bootRun 37``` 38 39Dashboard: 40 41[http://localhost:8085/dashboard](http://localhost:8085/dashboard) 42 43```bash 44curl --location --request GET \u0026#39;http://localhost:8080/invoke-job\u0026#39; 45curl --location --request GET \u0026#39;http://localhost:8080/direct-call\u0026#39; 46curl --location --request GET \u0026#39;http://localhost:8080/retry-job\u0026#39; 47 48``` 49 50Create a pool of 3 servers 51 52```bash 53java -jar build/libs/project59-1.0.0.jar --server.port=8081 --org.jobrunr.dashboard.enabled=true 54java -jar build/libs/project59-1.0.0.jar --server.port=8082 --org.jobrunr.dashboard.enabled=false 55java -jar build/libs/project59-1.0.0.jar --server.port=8083 --org.jobrunr.dashboard.enabled=false 56``` 57 58 59Submit 500 jobs that will be processed by the 3 servers 60 61```bash 62curl --location --request GET \u0026#39;http://localhost:8081/many-job/500\u0026#39; 63``` References https://www.jobrunr.io/en/ https://www.jobrunr.io/en/documentation/configuration/spring/\n","link":"https://gitorko.github.io/post/spring-jobrunr/","section":"post","tags":["spring","spring-boot","jobrunr","retry","work-distribution","postgres"],"title":"Spring - JobRunr"},{"body":"","link":"https://gitorko.github.io/tags/work-distribution/","section":"tags","tags":null,"title":"Work-Distribution"},{"body":"","link":"https://gitorko.github.io/tags/r2dbc/","section":"tags","tags":null,"title":"R2dbc"},{"body":"","link":"https://gitorko.github.io/categories/r2dbc/","section":"categories","tags":null,"title":"R2DBC"},{"body":"Webflux integration with reactive R2DBC with liquibase. R2DBC stands for Reactive Relational Database Connectivity, It provides a reactive driver to connect to relational database.\nGithub: https://github.com/gitorko/project65\nSpring Data R2DBC Spring Data R2DBC aims at being conceptually easy. In order to achieve this it does NOT offer caching, lazy loading, write behind or many other features of ORM frameworks. This makes Spring Data R2DBC a simple, limited, opinionated object mapper.\nThe following databases are supported via r2dbc libraries\nH2 (io.r2dbc:r2dbc-h2) MariaDB (org.mariadb:r2dbc-mariadb) Microsoft SQL Server (io.r2dbc:r2dbc-mssql) MySQL (dev.miku:r2dbc-mysql) jasync-sql MySQL (com.github.jasync-sql:jasync-r2dbc-mysql) Postgres (io.r2dbc:r2dbc-postgresql) Oracle (com.oracle.database.r2dbc:oracle-r2dbc) Code 1package com.demo.project65; 2 3import lombok.extern.slf4j.Slf4j; 4import org.springframework.boot.SpringApplication; 5import org.springframework.boot.autoconfigure.SpringBootApplication; 6 7@SpringBootApplication 8@Slf4j 9public class Main { 10 11 public static void main(String[] args) { 12 SpringApplication.run(Main.class, args); 13 } 14 15} 16 1package com.demo.project65.controller; 2 3import java.util.UUID; 4 5import com.demo.project65.domain.Customer; 6import com.demo.project65.service.CustomerService; 7import com.demo.project65.service.DataService; 8import lombok.AllArgsConstructor; 9import org.springframework.data.domain.Page; 10import org.springframework.data.domain.PageRequest; 11import org.springframework.http.ResponseEntity; 12import org.springframework.web.bind.annotation.DeleteMapping; 13import org.springframework.web.bind.annotation.GetMapping; 14import org.springframework.web.bind.annotation.PathVariable; 15import org.springframework.web.bind.annotation.PostMapping; 16import org.springframework.web.bind.annotation.PutMapping; 17import org.springframework.web.bind.annotation.RequestBody; 18import org.springframework.web.bind.annotation.RequestMapping; 19import org.springframework.web.bind.annotation.RequestParam; 20import org.springframework.web.bind.annotation.RestController; 21import reactor.core.publisher.Flux; 22import reactor.core.publisher.Mono; 23 24@RestController 25@AllArgsConstructor 26@RequestMapping(\u0026#34;/customer\u0026#34;) 27public class HomeController { 28 29 final CustomerService customerService; 30 final DataService dataService; 31 32 @GetMapping(\u0026#34;/all\u0026#34;) 33 public Flux\u0026lt;Customer\u0026gt; findAll() { 34 return customerService.findAll(); 35 } 36 37 @GetMapping(\u0026#34;/{id}\u0026#34;) 38 public Mono\u0026lt;ResponseEntity\u0026lt;Customer\u0026gt;\u0026gt; findById(@PathVariable UUID id) { 39 return customerService.findById(id) 40 .map(ResponseEntity::ok) 41 .defaultIfEmpty(ResponseEntity.notFound().build()); 42 } 43 44 @PostMapping(value = \u0026#34;/save\u0026#34;) 45 public Mono\u0026lt;Customer\u0026gt; save(@RequestBody Customer customer) { 46 return customerService.save(customer); 47 } 48 49 @PutMapping(value = \u0026#34;/update\u0026#34;) 50 public Mono\u0026lt;Customer\u0026gt; update(@RequestBody Customer customer) { 51 return customerService.update(customer); 52 } 53 54 @DeleteMapping(value = \u0026#34;/{id}\u0026#34;) 55 public Mono\u0026lt;Void\u0026gt; delete(@PathVariable UUID id) { 56 return customerService.deleteById(id); 57 } 58 59 @GetMapping(\u0026#34;/find\u0026#34;) 60 public Flux\u0026lt;Customer\u0026gt; find(@RequestParam String name, @RequestParam Integer age) { 61 return customerService.findByNameAndAge(name, age); 62 } 63 64 @GetMapping(\u0026#34;/page\u0026#34;) 65 public Mono\u0026lt;Page\u0026lt;Customer\u0026gt;\u0026gt; findPage(@RequestParam(\u0026#34;page\u0026#34;) int page, @RequestParam(\u0026#34;size\u0026#34;) int size) { 66 return customerService.findAllByPage(PageRequest.of(page, size)); 67 } 68 69 @PostMapping(\u0026#34;/search\u0026#34;) 70 public Flux\u0026lt;Customer\u0026gt; search(@RequestBody Customer customer) { 71 return customerService.search(customer); 72 } 73 74 @PostMapping(\u0026#34;/findOne\u0026#34;) 75 public Mono\u0026lt;Customer\u0026gt; findOne(@RequestBody Customer customer) { 76 return customerService.findOne(customer); 77 } 78} 1package com.demo.project65.config; 2 3import org.springframework.context.annotation.Bean; 4import org.springframework.context.annotation.Configuration; 5import org.springframework.data.domain.ReactiveAuditorAware; 6import org.springframework.data.r2dbc.config.EnableR2dbcAuditing; 7import org.springframework.data.r2dbc.repository.config.EnableR2dbcRepositories; 8import reactor.core.publisher.Mono; 9 10@Configuration 11@EnableR2dbcAuditing 12@EnableR2dbcRepositories 13public class DbConfig { 14 15 @Bean 16 public ReactiveAuditorAware\u0026lt;String\u0026gt; auditorAware() { 17 return () -\u0026gt; Mono.just(\u0026#34;admin\u0026#34;); 18 } 19} 1package com.demo.project65.repository; 2 3import java.util.UUID; 4 5import com.demo.project65.domain.Customer; 6import org.springframework.data.domain.Pageable; 7import org.springframework.data.r2dbc.repository.Query; 8import org.springframework.data.r2dbc.repository.R2dbcRepository; 9import reactor.core.publisher.Flux; 10 11public interface CustomerRepository extends R2dbcRepository\u0026lt;Customer, UUID\u0026gt; { 12 13 @Query(\u0026#34;select * from customer e where e.name = $1 and e.age = $2\u0026#34;) 14 Flux\u0026lt;Customer\u0026gt; findByNameAndAge(String name, Integer age); 15 16 Flux\u0026lt;Customer\u0026gt; findAllBy(Pageable pageable); 17 18} 1spring: 2 r2dbc: 3 url: r2dbc:postgresql://localhost:5432/test-db 4 username: test 5 password: test@123 6 pool: 7 enabled: true 8 initial-size: 10 9 max-size: 30 10 liquibase: 11 enabled: true 12 change-log: db/changelog/db.changelog-main.yaml 13 url: jdbc:postgresql://localhost:5432/test-db 14 user: test 15 password: test@123 16 17logging: 18 level: 19 org.springframework.r2dbc: DEBUG Postman Import the postman collection to postman\nPostman Collection\nSetup 1# Project 65 2 3Spring Webflux \u0026amp; R2DBC 4 5[https://gitorko.github.io/spring-webflux-r2dbc/](https://gitorko.github.io/spring-webflux-r2dbc/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk version \u0026#34;21.0.3\u0026#34; 2024-04-16 LTS 14``` 15 16### Postgres DB 17 18```bash 19docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:14 20docker ps 21docker exec -it pg-container psql -U postgres -W postgres 22CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 23CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 24GRANT ALL PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 25 26docker stop pg-container 27docker start pg-container 28``` 29 30Ensure you login with test user and create the table. 31 32```bash 33docker exec -it pg-container psql -U test -W test-db 34\\dt 35``` 36 37Create the table 38 39```sql 40CREATE TABLE customer ( 41 id SERIAL PRIMARY KEY, 42 name VARCHAR(50) NOT NULL, 43 age INT NOT NULL 44); 45``` 46 47### Dev 48 49To run the code. 50 51```bash 52./gradlew clean build 53./gradlew bootRun 54``` Errors If you encounter any of the error mentioned below it could probably be because the data type in postgres cant be mapped by r2dbc. Eg: CHAR is not supported, changing to VARCHAR will fix the issue.\n1org.springframework.data.mapping.MappingException: Could not read property public java.lang.String com.demo.project65.Customer.name from result set! 2org.springframework.data.r2dbc.function.convert.EntityRowMapper.readFrom(EntityRowMapper.java:103) ~[spring-data-r2dbc-1.0.0.M1.jar:1.0.0.M1] 3Caused by: java.lang.IllegalArgumentException: Cannot decode value of type java.lang.Object 4org.springframework.data.r2dbc.function.convert.EntityRowMapper.readFrom(EntityRowMapper.java:99) ~[spring-data-r2dbc-1.0.0.M1.jar:1.0.0.M1] If you encounter unit test failures because of r2dbc repository in @SpringBootTest then exclude the classes\n1@EnableAutoConfiguration(exclude = {R2dbcAutoConfiguration.class, LiquibaseAutoConfiguration.class}) References https://spring.io/projects/spring-data-r2dbc\n","link":"https://gitorko.github.io/post/spring-webflux-r2dbc/","section":"post","tags":["r2dbc","webflux","auditing","liquibase","test-container"],"title":"Spring Webflux \u0026 R2DBC"},{"body":"","link":"https://gitorko.github.io/tags/test-container/","section":"tags","tags":null,"title":"Test-Container"},{"body":"","link":"https://gitorko.github.io/tags/deeplearning4j/","section":"tags","tags":null,"title":"Deeplearning4j"},{"body":"Create a neural network with deeplearning4j library.\nMachine Learning Given a labelled data set, a machine learning algorithm will determine mathematical function based on the relationship between label and data, this can later be used to predict the label for any new input data. Neural networks are computational models that consist of interconnected layers of nodes. A neural network can derive information from new data, even if it has not seen these particular data items before.\nIris Flower classification problem states that given measurements of a flower, we should be able to predict which type of flower it is.\nFlower classification\nClass Number (Type) Class 0 Iris Setosa 1 Iris Versicolour 2 Iris Virginica Input Data\nSepal Length Sepal Width Petal Length Petal Width Class (Type) 5.1 3.5 1.4 0.2 0 (Iris Setosa) Training Step - First we train our model by using an existing data set of flower measurements.\nLoad data Normalize data Split data set to training and test data Configure model - Creates neural network Train model Evaluate Model Export Model Prediction Step - Using the model we created above we predict which flower type an input belongs to.\nLoad Model Format Data Normal Data Feed Data Get Label Code 1package com.demo.neural; 2 3import java.io.File; 4import java.io.IOException; 5import java.util.Arrays; 6import java.util.List; 7 8import lombok.extern.slf4j.Slf4j; 9import org.datavec.api.records.reader.RecordReader; 10import org.datavec.api.records.reader.impl.csv.CSVRecordReader; 11import org.datavec.api.split.FileSplit; 12import org.deeplearning4j.datasets.datavec.RecordReaderDataSetIterator; 13import org.deeplearning4j.nn.conf.MultiLayerConfiguration; 14import org.deeplearning4j.nn.conf.NeuralNetConfiguration; 15import org.deeplearning4j.nn.conf.layers.DenseLayer; 16import org.deeplearning4j.nn.conf.layers.OutputLayer; 17import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; 18import org.deeplearning4j.nn.weights.WeightInit; 19import org.deeplearning4j.optimize.listeners.ScoreIterationListener; 20import org.nd4j.evaluation.classification.Evaluation; 21import org.nd4j.linalg.activations.Activation; 22import org.nd4j.linalg.api.buffer.DataBuffer; 23import org.nd4j.linalg.api.ndarray.INDArray; 24import org.nd4j.linalg.cpu.nativecpu.NDArray; 25import org.nd4j.linalg.cpu.nativecpu.buffer.FloatBuffer; 26import org.nd4j.linalg.dataset.SplitTestAndTrain; 27import org.nd4j.linalg.dataset.api.DataSet; 28import org.nd4j.linalg.dataset.api.iterator.DataSetIterator; 29import org.nd4j.linalg.dataset.api.preprocessor.DataNormalization; 30import org.nd4j.linalg.dataset.api.preprocessor.NormalizerStandardize; 31import org.nd4j.linalg.dataset.api.preprocessor.serializer.NormalizerSerializer; 32import org.nd4j.linalg.learning.config.Sgd; 33import org.nd4j.linalg.lossfunctions.LossFunctions; 34import org.springframework.boot.CommandLineRunner; 35import org.springframework.boot.SpringApplication; 36import org.springframework.boot.autoconfigure.SpringBootApplication; 37import org.springframework.context.annotation.Bean; 38 39@Slf4j 40@SpringBootApplication 41public class Main { 42 43 //5 values in each row of the iris.txt CSV: 4 input features followed by an integer label (class) index. Labels are the 5th value (index 4) in each row 44 final int LABEL_INDEX = 4; 45 //3 classes (types of iris flowers) in the iris data set. Classes have integer values 46 // 0, 1 or 2 47 final int NUM_CLASS = 3; 48 //Iris data set: 150 examples total. We are loading all of them into one DataSet 49 // (not recommended for large data sets) 50 final int BATCH_SIZE = 150; 51 52 final List\u0026lt;String\u0026gt; flowerType = Arrays.asList(\u0026#34;Iris Setosa\u0026#34;, \u0026#34;Iris Versicolour\u0026#34;, \u0026#34;Iris Virginica\u0026#34;); 53 54 public static void main(String[] args) { 55 SpringApplication.run(Main.class, args); 56 } 57 58 @Bean 59 public CommandLineRunner sendData() { 60 return args -\u0026gt; { 61 generateModel(); 62 log.info(\u0026#34;Flower type is {}\u0026#34;, predictForInput(new float[]{5.1f, 3.5f, 1.4f, 0.2f})); 63 log.info(\u0026#34;Flower type is {}\u0026#34;, predictForInput(new float[]{6.5f, 3.0f, 5.5f, 1.8f})); 64 }; 65 } 66 67 private DataSet loadDataSet() throws IOException, InterruptedException { 68 int numLinesToSkip = 0; 69 char delimiter = \u0026#39;,\u0026#39;; 70 RecordReader recordReader = new CSVRecordReader(numLinesToSkip, delimiter); 71 recordReader.initialize(new FileSplit(new File(\u0026#34;src/main/resources/iris.txt\u0026#34;))); 72 73 //RecordReaderDataSetIterator handles conversion to DataSet objects, ready for use in neural network 74 DataSetIterator iterator = new RecordReaderDataSetIterator(recordReader, BATCH_SIZE, LABEL_INDEX, NUM_CLASS); 75 DataSet allData = iterator.next(); 76 allData.shuffle(); 77 return allData; 78 } 79 80 private void generateModel() throws IOException, InterruptedException { 81 DataSet allData = loadDataSet(); 82 //Use 65% of data for training 83 SplitTestAndTrain testAndTrain = allData.splitTestAndTrain(0.65); 84 85 DataSet trainingData = testAndTrain.getTrain(); 86 DataSet testData = testAndTrain.getTest(); 87 88 //We need to normalize our data. We\u0026#39;ll use NormalizeStandardize (which gives us mean 0, unit variance): 89 DataNormalization normalizer = new NormalizerStandardize(); 90 //Collect the statistics (mean/stdev) from the training data. This does not modify the input data 91 normalizer.fit(trainingData); 92 //Apply normalization to the training data 93 normalizer.transform(trainingData); 94 //Apply normalization to the test data. This is using statistics calculated from the *training* set 95 normalizer.transform(testData); 96 97 final int numInputs = 4; 98 int outputNum = 3; 99 long seed = 6; 100 101 log.info(\u0026#34;Build model....\u0026#34;); 102 MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() 103 .seed(seed) 104 .activation(Activation.TANH) 105 .weightInit(WeightInit.XAVIER) 106 .updater(new Sgd(0.1)) 107 .l2(1e-4) 108 .list() 109 .layer(new DenseLayer.Builder().nIn(numInputs).nOut(3) 110 .build()) 111 .layer(new DenseLayer.Builder().nIn(3).nOut(3) 112 .build()) 113 .layer(new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) 114 .activation(Activation.SOFTMAX) //Override the global TANH activation with softmax for this layer 115 .nIn(3).nOut(outputNum).build()) 116 .build(); 117 118 //run the model 119 MultiLayerNetwork model = new MultiLayerNetwork(conf); 120 model.init(); 121 //record score once every 100 iterations 122 model.setListeners(new ScoreIterationListener(100)); 123 124 for (int i = 0; i \u0026lt; 1000; i++) { 125 model.fit(trainingData); 126 } 127 128 //evaluate the model on the test set 129 Evaluation eval = new Evaluation(3); 130 INDArray output = model.output(testData.getFeatures()); 131 eval.eval(testData.getLabels(), output); 132 log.info(eval.stats()); 133 saveModelAndNormalizer(model, normalizer); 134 135 } 136 137 private void saveModelAndNormalizer(MultiLayerNetwork model, DataNormalization normalizer) throws IOException { 138 log.info(\u0026#34;Saving model \u0026amp; normalizer!\u0026#34;); 139 File modelFile = new File(\u0026#34;model.file\u0026#34;); 140 model.save(modelFile, false); 141 142 File normalizerFile = new File(\u0026#34;normalize.file\u0026#34;); 143 NormalizerSerializer.getDefault().write(normalizer, normalizerFile); 144 } 145 146 private String predictForInput(float[] input) throws Exception { 147 log.info(\u0026#34;Loading model \u0026amp; normalizer!\u0026#34;); 148 File modelFile = new File(\u0026#34;model.file\u0026#34;); 149 MultiLayerNetwork model = MultiLayerNetwork.load(modelFile, false); 150 File normalizerFile = new File(\u0026#34;normalize.file\u0026#34;); 151 DataNormalization normalizer = NormalizerSerializer.getDefault().restore(normalizerFile); 152 153 DataBuffer dataBuffer = new FloatBuffer(input); 154 NDArray ndArray = new NDArray(1, 4); 155 ndArray.setData(dataBuffer); 156 157 normalizer.transform(ndArray); 158 INDArray result = model.output(ndArray, false); 159 getBestPredicationIndex(result); 160 161 return flowerType.get(getBestPredicationIndex(result)); 162 } 163 164 private int getBestPredicationIndex(INDArray predictions) { 165 int maxIndex = 0; 166 for (int i = 0; i \u0026lt; 3; i++) { 167 if (predictions.getFloat(i) \u0026gt; predictions.getFloat(maxIndex)) { 168 maxIndex = i; 169 } 170 } 171 return maxIndex; 172 } 173 174} Setup 1# Project102 2 3deeplearning4j - Supervised classification (Neural Networks) References https://deeplearning4j.konduit.ai/\n","link":"https://gitorko.github.io/post/machine-learning-deeplearning4j/","section":"post","tags":["machine-learning","neural-networks","deeplearning4j"],"title":"Machine Learning - deeplearning4j"},{"body":"","link":"https://gitorko.github.io/tags/machine-learning/","section":"tags","tags":null,"title":"Machine-Learning"},{"body":"","link":"https://gitorko.github.io/categories/machine-learning/","section":"categories","tags":null,"title":"Machine-Learning"},{"body":"","link":"https://gitorko.github.io/tags/neural-networks/","section":"tags","tags":null,"title":"Neural-Networks"},{"body":"Spring boot application with distributed locking using postgres\nGithub: https://github.com/gitorko/project05\nDistributed Locking When there are many service running and need to acquire a lock to run a critical region of the code there is contention.\nUse OptimisticLocking to avoid 2 threads from acquiring the same lock Use UNIQUE constraint to ensure same lock is present only once in the db. Even if the server crashes/dies the locks should be auto released and not held forever or should not require manual intervention for cleanup. Use virtual threads cleanup locks after duration is completed. Only the node/server that acquired the lock can release the lock. Postgres is not a distributed database, here the services that run are distributed, the services require a lock which is provided by postgres.\nCode 1package com.demo.project05.service; 2 3import java.net.InetAddress; 4import java.time.LocalDateTime; 5import java.util.Optional; 6 7import com.demo.project05.domain.DistributedLock; 8import com.demo.project05.repository.DistributedLockRepository; 9import lombok.RequiredArgsConstructor; 10import lombok.SneakyThrows; 11import lombok.extern.slf4j.Slf4j; 12import org.springframework.stereotype.Service; 13import org.springframework.transaction.annotation.Propagation; 14import org.springframework.transaction.annotation.Transactional; 15 16/** 17 * Since @Transaction needs public scope we dont want to expose this class to other classes, hence it\u0026#39;s an internal class. 18 */ 19@Service 20@RequiredArgsConstructor 21@Slf4j 22public class InternalLockService { 23 24 final DistributedLockRepository repository; 25 final String lockedBy = getHostIdentifier(); 26 27 @Transactional(propagation = Propagation.REQUIRES_NEW) 28 public boolean tryLock(String lockName, int expirySeconds) { 29 LocalDateTime lockUntil = LocalDateTime.now().plusSeconds(expirySeconds); 30 LocalDateTime now = LocalDateTime.now(); 31 Optional\u0026lt;DistributedLock\u0026gt; lockOptional = repository.findUnlocked(lockName, now); 32 if (lockOptional.isPresent()) { 33 DistributedLock lock = lockOptional.get(); 34 lock.setLockUntil(lockUntil); 35 lock.setLockAt(now); 36 lock.setLockBy(lockedBy); 37 repository.save(lock); 38 } else { 39 DistributedLock newLock = new DistributedLock(); 40 newLock.setLockName(lockName); 41 newLock.setLockUntil(lockUntil); 42 newLock.setLockAt(now); 43 newLock.setLockBy(lockedBy); 44 repository.save(newLock); 45 } 46 return true; 47 } 48 49 @Transactional(propagation = Propagation.REQUIRES_NEW) 50 public boolean unlock(String lockName) { 51 Optional\u0026lt;DistributedLock\u0026gt; lockOptional = repository.findByLockName(lockName); 52 if (lockOptional.isPresent()) { 53 DistributedLock lock = lockOptional.get(); 54 //Only the node that locked will be able to unlock 55 if (lockedBy.equals(lock.getLockBy())) { 56 lock.setLockUntil(null); 57 lock.setLockAt(null); 58 lock.setLockBy(null); 59 repository.save(lock); 60 } 61 return true; 62 } 63 return false; 64 } 65 66 @SneakyThrows 67 private String getHostIdentifier() { 68 // Get unique identifier for this instance, e.g., hostname or UUID 69 return InetAddress.getLocalHost().getHostName(); 70 } 71} 1package com.demo.project05.service; 2 3import java.util.concurrent.TimeUnit; 4 5import lombok.RequiredArgsConstructor; 6import lombok.extern.slf4j.Slf4j; 7import org.springframework.dao.DataIntegrityViolationException; 8import org.springframework.orm.ObjectOptimisticLockingFailureException; 9import org.springframework.stereotype.Service; 10 11@Service 12@Slf4j 13@RequiredArgsConstructor 14public class DistributedLockService { 15 16 final InternalLockService internalLockService; 17 18 public boolean acquireLock(String lockName, int expirySeconds) { 19 log.info(\u0026#34;Attempting to acquire lock: {}\u0026#34;, lockName); 20 try { 21 boolean lockStatus = internalLockService.tryLock(lockName, expirySeconds); 22 scheduleUnlock(lockName, expirySeconds); 23 return lockStatus; 24 } catch (ObjectOptimisticLockingFailureException ex) { 25 log.error(\u0026#34;Unable to acquire lock due to concurrent request\u0026#34;); 26 return false; 27 } catch (DataIntegrityViolationException ex) { 28 log.error(\u0026#34;Lock already exists\u0026#34;); 29 return false; 30 } catch (Exception ex) { 31 log.error(\u0026#34;Failed to acquire lock\u0026#34;); 32 return false; 33 } 34 } 35 36 public boolean releaseLock(String lockName) { 37 try { 38 log.info(\u0026#34;Attempting to release lock: {}\u0026#34;, lockName); 39 return internalLockService.unlock(lockName); 40 } catch (Exception ex) { 41 log.error(\u0026#34;Failed to release lock\u0026#34;); 42 return false; 43 } 44 } 45 46 /** 47 * Auto cleanup job. 48 * Code will work even if this fails. But this will set the lock to null to make it more clear that it is unused. 49 * Even if server dies the lock will be released based on lock_until time 50 */ 51 private void scheduleUnlock(String lockName, int expirySeconds) { 52 Thread.startVirtualThread(() -\u0026gt; { 53 try { 54 TimeUnit.SECONDS.sleep(expirySeconds); 55 releaseLock(lockName); 56 } catch (InterruptedException e) { 57 Thread.currentThread().interrupt(); 58 } 59 }); 60 } 61} 1package com.demo.project05.controller; 2 3import com.demo.project05.service.DistributedLockService; 4import lombok.RequiredArgsConstructor; 5import lombok.extern.slf4j.Slf4j; 6import org.springframework.web.bind.annotation.GetMapping; 7import org.springframework.web.bind.annotation.RequestParam; 8import org.springframework.web.bind.annotation.RestController; 9 10@RestController 11@RequiredArgsConstructor 12@Slf4j 13public class LockController { 14 15 final DistributedLockService distributedLockService; 16 17 @GetMapping(\u0026#34;/try-lock\u0026#34;) 18 public String tryLock(@RequestParam String lockName, @RequestParam int expirySeconds) { 19 boolean lockAcquired = distributedLockService.acquireLock(lockName, expirySeconds); 20 return lockAcquired ? \u0026#34;Lock acquired!\u0026#34; : \u0026#34;Failed to acquire lock!\u0026#34;; 21 } 22 23 @GetMapping(\u0026#34;/unlock\u0026#34;) 24 public String unlock(@RequestParam String lockName) { 25 distributedLockService.releaseLock(lockName); 26 return \u0026#34;Lock released!\u0026#34;; 27 } 28} 1CREATE TABLE distributed_lock 2( 3 lock_id BIGSERIAL PRIMARY KEY, 4 lock_name VARCHAR(255) UNIQUE, 5 lock_until TIMESTAMP, 6 lock_at TIMESTAMP, 7 lock_by VARCHAR(255), 8 lock_version BIGINT 9); Postman Import the postman collection to postman\nPostman Collection\nSetup 1# Project 05 2 3Distributed Locking - Postgres 4 5[https://gitorko.github.io/post/distributed-locking-postgres](https://gitorko.github.io/post/distributed-locking-postgres) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk 21.0.3 2024-04-16 LTS 14``` 15 16### Postgres DB 17 18```bash 19docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:14 20docker ps 21docker exec -it pg-container psql -U postgres -W postgres 22CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 23CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 24grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 25 26docker stop pg-container 27docker start pg-container 28``` 29 30### Dev 31 32To run the backend in dev mode. 33 34```bash 35./gradlew clean build 36./gradlew bootRun 37 38``` 39 References https://ignite.apache.org/\n","link":"https://gitorko.github.io/post/distributed-locking-postgres/","section":"post","tags":["distributed-lock","postgres","liquibase"],"title":"Distributed Locking - Postgres"},{"body":"","link":"https://gitorko.github.io/tags/reactive-jdbc/","section":"tags","tags":null,"title":"Reactive-Jdbc"},{"body":"Webflux integration with reactive JDBC, to allow non-blocking calls to database.\nGithub: https://github.com/gitorko/project64\nWebflux JDBC This approach provides alternate way to integrate existing relational database with webflux if the project is not ready to use R2DBC.\nCode 1package com.demo.project64; 2 3import java.time.Duration; 4 5import com.demo.project64.domain.Customer; 6import com.demo.project64.repositoryservice.CustomerReactiveRepoService; 7import lombok.extern.slf4j.Slf4j; 8import org.springframework.boot.CommandLineRunner; 9import org.springframework.boot.SpringApplication; 10import org.springframework.boot.autoconfigure.SpringBootApplication; 11import org.springframework.context.annotation.Bean; 12import reactor.core.publisher.Flux; 13 14@SpringBootApplication 15@Slf4j 16public class Main { 17 18 public static void main(String[] args) { 19 SpringApplication.run(Main.class, args); 20 } 21 22 @Bean 23 public CommandLineRunner seedData(CustomerReactiveRepoService customerReactiveService) { 24 return args -\u0026gt; { 25 log.info(\u0026#34;Seeding data!\u0026#34;); 26 27 Flux\u0026lt;String\u0026gt; names = Flux.just(\u0026#34;raj\u0026#34;, \u0026#34;david\u0026#34;, \u0026#34;pam\u0026#34;).delayElements(Duration.ofSeconds(1)); 28 Flux\u0026lt;Integer\u0026gt; ages = Flux.just(25, 27, 30).delayElements(Duration.ofSeconds(1)); 29 Flux\u0026lt;Customer\u0026gt; customers = Flux.zip(names, ages).map(tupple -\u0026gt; { 30 return new Customer(null, tupple.getT1(), tupple.getT2()); 31 }); 32 33 customerReactiveService.deleteAll().thenMany(customers.flatMap(c -\u0026gt; customerReactiveService.save(c)) 34 .thenMany(customerReactiveService.findAll())).subscribe(System.out::println); 35 }; 36 } 37 38} 39 1package com.demo.project64.repositoryservice; 2 3import java.util.Optional; 4import java.util.concurrent.Callable; 5 6import org.springframework.beans.factory.annotation.Autowired; 7import org.springframework.beans.factory.annotation.Qualifier; 8import org.springframework.data.domain.Page; 9import org.springframework.data.domain.Pageable; 10import org.springframework.data.jpa.repository.JpaRepository; 11import reactor.core.publisher.Flux; 12import reactor.core.publisher.Mono; 13import reactor.core.scheduler.Scheduler; 14import reactor.core.scheduler.Schedulers; 15 16public abstract class AbstractReactiveRepoService\u0026lt;T\u0026gt; { 17 18 @Qualifier(\u0026#34;jdbcScheduler\u0026#34;) 19 @Autowired 20 Scheduler jdbcScheduler; 21 22 public Mono\u0026lt;Page\u0026lt;T\u0026gt;\u0026gt; findAll(Pageable pageable) { 23 return asyncCallable(() -\u0026gt; getRepository().findAll(pageable)); 24 } 25 26 public Flux\u0026lt;T\u0026gt; findAll() { 27 return asyncIterable(() -\u0026gt; getRepository().findAll().iterator()); 28 } 29 30 public Mono\u0026lt;Optional\u0026lt;T\u0026gt;\u0026gt; findById(Long id) { 31 return asyncCallable(() -\u0026gt; getRepository().findById(id)); 32 } 33 34 public Mono\u0026lt;T\u0026gt; save(T customer) { 35 return (Mono\u0026lt;T\u0026gt;) asyncCallable(() -\u0026gt; getRepository().save(customer)); 36 } 37 38 public Mono\u0026lt;Void\u0026gt; delete(T object) { 39 return asyncCallable(() -\u0026gt; { 40 getRepository().delete(object); 41 return null; 42 }); 43 } 44 45 public Mono\u0026lt;Void\u0026gt; deleteAll() { 46 return asyncCallable(() -\u0026gt; { 47 getRepository().deleteAll(); 48 return null; 49 }); 50 } 51 52 protected \u0026lt;T\u0026gt; Mono\u0026lt;T\u0026gt; asyncCallable(Callable\u0026lt;T\u0026gt; callable) { 53 return Mono.fromCallable(callable).subscribeOn(Schedulers.newParallel(\u0026#34;jdbc-thread\u0026#34;)).publishOn(jdbcScheduler); 54 } 55 56 protected \u0026lt;T\u0026gt; Flux\u0026lt;T\u0026gt; asyncIterable(Iterable\u0026lt;T\u0026gt; iterable) { 57 return Flux.fromIterable(iterable).subscribeOn(Schedulers.newParallel(\u0026#34;jdbc-thread\u0026#34;)).publishOn(jdbcScheduler); 58 } 59 60 protected abstract JpaRepository getRepository(); 61 62} 1package com.demo.project64.repositoryservice; 2 3import java.util.List; 4 5import com.demo.project64.domain.Customer; 6import com.demo.project64.repository.CustomerRepository; 7import lombok.RequiredArgsConstructor; 8import lombok.extern.slf4j.Slf4j; 9import org.springframework.data.jpa.repository.JpaRepository; 10import org.springframework.stereotype.Service; 11import reactor.core.publisher.Mono; 12 13@Service 14@RequiredArgsConstructor 15@Slf4j 16public class CustomerReactiveRepoService extends AbstractReactiveRepoService\u0026lt;Customer\u0026gt; { 17 18 private final CustomerRepository customerRepository; 19 20 @Override 21 protected JpaRepository getRepository() { 22 return customerRepository; 23 } 24 25 public Mono\u0026lt;List\u0026lt;Customer\u0026gt;\u0026gt; findByNameAndAge(String name, Integer age) { 26 return asyncCallable(() -\u0026gt; customerRepository.findByNameAndAge(name, age)); 27 } 28 29} 30 31 1package com.demo.project64.repository; 2 3import java.util.List; 4 5import com.demo.project64.domain.Customer; 6import org.springframework.data.jpa.repository.JpaRepository; 7 8public interface CustomerRepository extends JpaRepository\u0026lt;Customer, Long\u0026gt; { 9 List\u0026lt;Customer\u0026gt; findByNameAndAge(String name, Integer age); 10} 1package com.demo.project64.controller; 2 3import java.util.List; 4import java.util.Optional; 5 6import com.demo.project64.domain.Customer; 7import com.demo.project64.domain.DownloadFile; 8import com.demo.project64.service.CsvService; 9import com.demo.project64.repositoryservice.CustomerReactiveRepoService; 10import com.demo.project64.repositoryservice.DownloadFileReactiveRepoService; 11import lombok.RequiredArgsConstructor; 12import org.springframework.core.io.FileSystemResource; 13import org.springframework.core.io.Resource; 14import org.springframework.http.CacheControl; 15import org.springframework.http.HttpHeaders; 16import org.springframework.http.MediaType; 17import org.springframework.http.ResponseEntity; 18import org.springframework.web.bind.annotation.GetMapping; 19import org.springframework.web.bind.annotation.PathVariable; 20import org.springframework.web.bind.annotation.PostMapping; 21import org.springframework.web.bind.annotation.RequestBody; 22import org.springframework.web.bind.annotation.RequestMapping; 23import org.springframework.web.bind.annotation.RequestParam; 24import org.springframework.web.bind.annotation.RestController; 25import reactor.core.publisher.Flux; 26import reactor.core.publisher.Mono; 27 28@RestController 29@RequestMapping(\u0026#34;/api\u0026#34;) 30@RequiredArgsConstructor 31public class HomeController { 32 33 private final CustomerReactiveRepoService customerReactiveService; 34 private final CsvService csvService; 35 private final DownloadFileReactiveRepoService downloadFileReactiveRepoService; 36 37 @GetMapping(\u0026#34;/customers\u0026#34;) 38 public Flux\u0026lt;Customer\u0026gt; getAllCustomer() { 39 return customerReactiveService.findAll(); 40 } 41 42 @GetMapping(\u0026#34;/customer/{customerId}\u0026#34;) 43 public Mono\u0026lt;Optional\u0026lt;Customer\u0026gt;\u0026gt; findById(@PathVariable Long customerId) { 44 return customerReactiveService.findById(customerId); 45 } 46 47 @PostMapping(value = \u0026#34;/customer\u0026#34;) 48 public Mono\u0026lt;Customer\u0026gt; save(@RequestBody Customer customer) { 49 return customerReactiveService.save(customer); 50 } 51 52 @GetMapping(\u0026#34;/customer\u0026#34;) 53 public Mono\u0026lt;List\u0026lt;Customer\u0026gt;\u0026gt; findById(@RequestParam String name, @RequestParam Integer age) { 54 return customerReactiveService.findByNameAndAge(name, age); 55 } 56 57 @GetMapping(\u0026#34;/csv\u0026#34;) 58 public Mono\u0026lt;DownloadFile\u0026gt; generateCsvFile() { 59 return csvService.generateCsvFile(); 60 } 61 62 @GetMapping(\u0026#34;/downloads\u0026#34;) 63 public Flux\u0026lt;DownloadFile\u0026gt; findAllDownloads() { 64 return downloadFileReactiveRepoService.findAll(); 65 } 66 67 @GetMapping(path = \u0026#34;/download/{id}\u0026#34;, produces = MediaType.APPLICATION_OCTET_STREAM_VALUE) 68 Mono\u0026lt;ResponseEntity\u0026lt;Resource\u0026gt;\u0026gt; downloadCsvFile(@PathVariable(\u0026#34;id\u0026#34;) Long id) { 69 return csvService.getFileResourcePath(id) 70 .filter(response -\u0026gt; csvService.isFileExists(response)) 71 .flatMap(s -\u0026gt; { 72 Resource resource = new FileSystemResource(s); 73 return Mono.just(ResponseEntity.ok() 74 .cacheControl(CacheControl.noCache()) 75 .header(HttpHeaders.CONTENT_DISPOSITION, \u0026#34;attachment; filename=\u0026#34; + resource.getFilename()) 76 .header(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_OCTET_STREAM_VALUE) 77 .body(resource)); 78 79 }).defaultIfEmpty(ResponseEntity.notFound().build()); 80 } 81} 1package com.demo.project64.config; 2 3import java.util.concurrent.Executors; 4 5import org.springframework.beans.factory.annotation.Value; 6import org.springframework.context.annotation.Bean; 7import org.springframework.context.annotation.Configuration; 8import org.springframework.transaction.PlatformTransactionManager; 9import org.springframework.transaction.support.TransactionTemplate; 10import reactor.core.scheduler.Scheduler; 11import reactor.core.scheduler.Schedulers; 12 13@Configuration 14class SchedulerConfig { 15 16 @Value(\u0026#34;${spring.datasource.hikari.maximum-pool-size:100}\u0026#34;) 17 private int connectionPoolSize; 18 19 @Bean 20 public Scheduler jdbcScheduler() { 21 return Schedulers.fromExecutor(Executors.newFixedThreadPool(connectionPoolSize)); 22 } 23 24} 1spring: 2 main: 3 banner-mode: \u0026#34;off\u0026#34; 4 datasource: 5 driver-class-name: org.postgresql.Driver 6 host: localhost 7 url: jdbc:postgresql://${spring.datasource.host}:5432/test-db 8 username: test 9 password: test@123 10 jpa: 11 show-sql: false 12 hibernate.ddl-auto: create-drop 13 properties.hibernate.temp.use_jdbc_metadata_defaults: false 14 database-platform: org.hibernate.dialect.PostgreSQLDialect 15 defer-datasource-initialization: true Postman Import the postman collection to postman\nPostman Collection\nSetup 1# Project 64 2 3Spring WebFlux Reactive JDBC 4 5[https://gitorko.github.io/spring-webflux-reactive-jdbc/](https://gitorko.github.io/spring-webflux-reactive-jdbc/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk version \u0026#34;21.0.3\u0026#34; 2024-04-16 LTS 14``` 15 16### Postgres DB 17 18``` 19docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:9.6.10 20docker ps 21docker exec -it pg-container psql -U postgres -W postgres 22CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 23CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 24grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 25 26docker stop pg-container 27docker start pg-container 28``` 29 30To seed large test data 31 32``` 33INSERT INTO customer (name, age) 34SELECT 35 \u0026#39;John\u0026#39;, 36 30 37FROM generate_series(1, 4000000); 38``` 39 40### Dev 41 42To run the code. 43 44```bash 45./gradlew clean build 46./gradlew bootRun 47``` References https://spring.io/blog/2018/12/07/reactive-programming-and-relational-databases\n","link":"https://gitorko.github.io/post/spring-webflux-reactive-jdbc/","section":"post","tags":["reactive-jdbc","webflux"],"title":"Spring Webflux \u0026 Reactive JDBC"},{"body":"Message Queue implementation using PostgreSQL\nGithub: https://github.com/gitorko/project81\nMessage Queue PostgreSQL can be used a messaging queue, it also offers features like LISTEN/NOTIFY which make it a suitable to support message queue.\nAdvantages\nReuse existing infrastructure - Use an existing database keeping the tech stack simple. Low messages throughput - Not every system needs high volume of messages to process per second. Persistent Store - You can query the db to check the messages if they are processed and manually trigger re-queue. This command notifies the channel of a new message in the queue\n1NOTIFY new_task_channel, \u0026#39;New task added\u0026#39;; This command listens for these notifications\n1LISTEN new_task_channel; You also need to lock the row being read to avoid the same row from being updated by 2 different transactions\nselect * from table FOR SHARE - This clause locks the selected rows for read, other threads can read but cant modify. select * from table FOR UPDATE - This clause locks the selected rows for update. This prevents other transactions from reading/modifying these rows until the current transaction is completed (committed or rolled back) select * from table FOR UPDATE SKIP LOCKED clause - This clause tells the database to skip rows that are already locked by another transaction. Instead of waiting for the lock to be released\nselect * from table FOR NO KEY SHARE - Use this when you want to ensure that no other transaction can obtain locks that would conflict with your current transaction’s updates, but you do not need to prevent other transactions from acquiring FOR SHARE locks. select * from table FOR NO KEY UPDATE - Use this when you need to prevent all types of locks that could conflict with your updates, providing a more restrictive lock compared to FOR NO KEY SHARE\nDisadvantages\nMissing notifications if a worker is disconnected. Row-level locking is needed to prevent multiple workers from picking up the same message. Code 1package com.demo.project81.config; 2 3import com.demo.project81.service.NotificationHandler; 4import com.demo.project81.service.NotifierService; 5import lombok.RequiredArgsConstructor; 6import lombok.extern.slf4j.Slf4j; 7import org.springframework.boot.CommandLineRunner; 8import org.springframework.context.annotation.Bean; 9import org.springframework.context.annotation.Configuration; 10 11@Configuration 12@RequiredArgsConstructor 13@Slf4j 14public class ListenerConfiguration { 15 16 @Bean 17 CommandLineRunner startListener(NotifierService notifier, NotificationHandler handler) { 18 return (args) -\u0026gt; { 19 log.info(\u0026#34;Starting task listener thread...\u0026#34;); 20 Thread.ofVirtual().name(\u0026#34;task-listener\u0026#34;).start(notifier.createNotificationHandler(handler)); 21 }; 22 } 23} 1package com.demo.project81.service; 2 3import java.time.LocalDateTime; 4import java.util.function.Consumer; 5 6import com.demo.project81.domain.Task; 7import lombok.RequiredArgsConstructor; 8import lombok.extern.slf4j.Slf4j; 9import org.postgresql.PGNotification; 10import org.springframework.stereotype.Component; 11 12@Component 13@Slf4j 14@RequiredArgsConstructor 15public class NotificationHandler implements Consumer\u0026lt;PGNotification\u0026gt; { 16 17 final TaskService taskService; 18 19 @Override 20 public void accept(PGNotification t) { 21 log.info(\u0026#34;Notification received: pid={}, name={}, param={}\u0026#34;, t.getPID(), t.getName(), t.getParameter()); 22 Task task = taskService.findByIdWithLock(Long.valueOf(t.getParameter())); 23 task.setProcessedAt(LocalDateTime.now()); 24 task.setProcessedBy(taskService.getHostName() + \u0026#34;_\u0026#34; + Thread.currentThread().getName()); 25 taskService.save(task); 26 log.info(\u0026#34;Processed Task: {}\u0026#34;, task); 27 } 28 29} 1package com.demo.project81.service; 2 3import java.sql.Connection; 4import java.util.function.Consumer; 5 6import com.demo.project81.domain.Task; 7import lombok.RequiredArgsConstructor; 8import lombok.extern.slf4j.Slf4j; 9import org.postgresql.PGConnection; 10import org.postgresql.PGNotification; 11import org.springframework.jdbc.core.JdbcTemplate; 12import org.springframework.stereotype.Service; 13import org.springframework.transaction.annotation.Transactional; 14 15@Service 16@RequiredArgsConstructor 17@Slf4j 18public class NotifierService { 19 20 static final String TASK_CHANNEL = \u0026#34;tasks\u0026#34;; 21 final JdbcTemplate jdbcTemplate; 22 23 @Transactional 24 public void notifyTaskCreated(Task task) { 25 log.info(\u0026#34;Notifying task channel!\u0026#34;); 26 jdbcTemplate.execute(\u0026#34;NOTIFY \u0026#34; + TASK_CHANNEL + \u0026#34;, \u0026#39;\u0026#34; + task.getId() + \u0026#34;\u0026#39;\u0026#34;); 27 } 28 29 public Runnable createNotificationHandler(Consumer\u0026lt;PGNotification\u0026gt; consumer) { 30 return () -\u0026gt; { 31 jdbcTemplate.execute((Connection connection) -\u0026gt; { 32 log.info(\u0026#34;notificationHandler: sending LISTEN command...\u0026#34;); 33 connection.createStatement().execute(\u0026#34;LISTEN \u0026#34; + TASK_CHANNEL); 34 35 PGConnection pgConnection = connection.unwrap(PGConnection.class); 36 37 while (!Thread.currentThread().isInterrupted()) { 38 PGNotification[] notifications = pgConnection.getNotifications(10000); 39 if (notifications == null || notifications.length == 0) { 40 continue; 41 } 42 for (PGNotification nt : notifications) { 43 consumer.accept(nt); 44 } 45 } 46 return 0; 47 }); 48 49 }; 50 } 51} 1package com.demo.project81.service; 2 3import java.net.InetAddress; 4import java.time.LocalDateTime; 5 6import com.demo.project81.domain.Task; 7import com.demo.project81.repository.TaskRepository; 8import lombok.RequiredArgsConstructor; 9import lombok.SneakyThrows; 10import org.springframework.stereotype.Service; 11import org.springframework.transaction.annotation.Transactional; 12 13@Service 14@RequiredArgsConstructor 15public class TaskService { 16 17 final TaskRepository taskRepository; 18 final NotifierService notifier; 19 20 @Transactional(readOnly = true) 21 public Task findById(Long id) { 22 return taskRepository.findById(id).orElseThrow(); 23 } 24 25 @Transactional 26 public Task findByIdWithLock(Long id) { 27 return taskRepository.findByIdWithLock(id); 28 } 29 30 @Transactional 31 public Task queueTask(Task task) { 32 task.setCreatedAt(LocalDateTime.now()); 33 task.setCreatedBy(getHostName() + \u0026#34;_\u0026#34; + Thread.currentThread().getName()); 34 task = taskRepository.save(task); 35 notifier.notifyTaskCreated(task); 36 return task; 37 } 38 39 @SneakyThrows 40 public String getHostName() { 41 return InetAddress.getLocalHost().getHostName(); 42 } 43 44 @Transactional 45 public Task save(Task task) { 46 return taskRepository.save(task); 47 } 48} Postman Import the postman collection to postman\nPostman Collection\nSetup 1# Project 81 2 3Message Queue - Postgres 4 5[https://gitorko.github.io/post/message-queue-postgres](https://gitorko.github.io/post/message-queue-postgres) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk 21.0.3 2024-04-16 LTS 14``` 15 16### Postgres DB 17 18```bash 19docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:14 20docker ps 21docker exec -it pg-container psql -U postgres -W postgres 22CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 23CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 24grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 25 26docker stop pg-container 27docker start pg-container 28``` 29 30### Dev 31 32To run the backend in dev mode. 33 34```bash 35./gradlew clean build 36./gradlew bootRun 37``` 38 References https://www.postgresql.org/docs/current/sql-listen.html\nhttps://www.postgresql.org/docs/current/sql-notify.html\nhttps://www.postgresql.org/docs/current/sql-lock.html\n","link":"https://gitorko.github.io/post/message-queue-postgres/","section":"post","tags":["jdbc","task","queue","liquibase"],"title":"Message Queue - Postgres"},{"body":"","link":"https://gitorko.github.io/categories/postgresql/","section":"categories","tags":null,"title":"PostgreSQL"},{"body":"","link":"https://gitorko.github.io/tags/queue/","section":"tags","tags":null,"title":"Queue"},{"body":"","link":"https://gitorko.github.io/categories/queue/","section":"categories","tags":null,"title":"Queue"},{"body":"","link":"https://gitorko.github.io/tags/task/","section":"tags","tags":null,"title":"Task"},{"body":"","link":"https://gitorko.github.io/tags/algorithms/","section":"tags","tags":null,"title":"Algorithms"},{"body":"","link":"https://gitorko.github.io/categories/algorithms/","section":"categories","tags":null,"title":"Algorithms"},{"body":"","link":"https://gitorko.github.io/tags/coding/","section":"tags","tags":null,"title":"Coding"},{"body":"","link":"https://gitorko.github.io/tags/data-structure/","section":"tags","tags":null,"title":"Data-Structure"},{"body":"","link":"https://gitorko.github.io/categories/ds/","section":"categories","tags":null,"title":"DS"},{"body":"A comprehensive guide for java coding interviews covering areas like algorithms, datastructures, sorting, leetcode problems, concurrency, java fundamentals.\nGithub: https://github.com/gitorko/project01\nPreparation We will first understand the fundamentals of data structure, like how heap, queue, stacks work and how to implement them from scratch. Then we will look at the various sorting algorithms. Then we will move to leetcode problems covering easy, medium \u0026amp; hard problems. Then we will move to concurrency problems, how atomic variable work, how locks work etc. Then we will cover some SQL and database queries. The problems solved here are concise \u0026amp; small making it easy to understand and revise. All problems solved here are developed with test driven approach, with various test that can be run locally. Each of the solutions follow certain pattern. Eg: if you learn back-tracking solution in one problem the pattern is similar when you solve it in other problem. This is very important when it comes to the learning aspect. Most problems have a parent problem, Once you solve this parent problem you can solve various subset or variation of this problem. Such problems are grouped as similar in the solutions. Approach To Solve Given a problem here are some questions that should help you figure out the general direction of how to solve it\nWhich data structure can I use? Arrays, LinkedList, HashMap, Heap, Tree, or Trie Do I need to use 2 data structures? eg: LRU How do I break the problem into smaller units, is there a problem within a problem, can i write a decision tree? Does this problem look similar to other problems you have solved? Will sorting make the problem easier to solve? Can I use any algorithmic techniques, like bfs, dfs, two pointer etc. Do any design patterns apply that could make it easier to maintain, like observer pattern? What is the time \u0026amp; space complexity? Best case, worst case time complexity? Average case is usually difficult to derive. Tips\nLinked list problems often use a dummy node. To delete a node in the link list you need previous node or be position one node behind. Time complexity is O(number of branches ^ n) In-order traversal of BST results in ascending order array. BFS requires a queue, DFS requires recursion. If the char set is bound to 26 chars, use char[26] array instead of Map or Set. Algo Techniques Sorting Map \u0026amp; Set Recursion Fast pointer \u0026amp; Slow pointer Min-Heap vs Max-Heap (Priority Queue) Binary search BFS vs DFS Two Pointers Sliding Window Fast pointer vs Slow pointer Backtracking Matrix Prefix sum Divide \u0026amp; Conquer Memoization / Dynamic programming Greedy Topological Sort Intervals Cyclic Sort Bitwise XOR / Bit manipulation Trie Stacks \u0026amp; Queue Big-O 1log(n) \u0026lt; √(n) \u0026lt; n \u0026lt; nlog(n) \u0026lt; n^2 \u0026lt; n^3 \u0026lt; 2^n \u0026lt; n! Algorithms and Data Structures Cheatsheet\nCoding Sorting Id Leetcode Solution Type 1 Bubble Sort Solution EASY 2 Selection sort Solution EASY 3 Insertion sort Solution EASY 4 MergeSort Solution MEDIUM 5 912. Sort an Array Solution MEDIUM 6 QuickSort Solution MEDIUM 7 912. Sort an Array Solution MEDIUM 8 Shell sort Solution HARD 9 Counting sort Solution EASY 10 Radix sort Solution MEDIUM 11 Bucket sort Solution EASY 12 Heap sort Solution MEDIUM 13 704. Binary Search Solution EASY 14 Employee Search Solution EASY Data Structure Id Leetcode Solution Type 1 Implement Circular Array Solution EASY 2 622. Design Circular Queue Solution MEDIUM 3 Implement ArrayList with array Solution EASY 4 Insert to BST, Delete from BST, Find from BST Solution EASY 5 Implement Doubly Linked List Solution EASY 6 707. Design Linked List Solution MEDIUM 7 706. Design HashMap Solution EASY 8 Implement Map Solution MEDIUM 9 Implement Max Heap Solution EASY 10 Implement Min Heap Solution EASY 11 Implement Queue Solution EASY 12 Implement Stack Solution EASY 13 208. Implement Trie, Prefix Tree Solution EASY 14 225. Implement Stack using Queues Solution EASY LeetCode - Easy Id Leetcode Solution Type 1 67. Add Binary Solution EASY - Number 2 989. Add to Array-Form of Integer Solution EASY - Number 3 455. Assign Cookies Solution EASY - Number 4 682. Baseball Game Solution EASY - Number 5 121. Best Time to Buy and Sell Stock Solution EASY - Number 6 605. Can Place Flowers Solution EASY - Number 7 724. Find Pivot Index Solution EASY - Number 8 9. Palindrome Number Solution EASY - Number 9 860. Lemonade Change Solution EASY - Number 10 1572. Matrix Diagonal Sum Solution EASY - Number 11 485. Max Consecutive Ones Solution EASY - Number 12 53. Maximum Subarray Solution EASY - Number 13 88. Merge Sorted Array Solution EASY - Number 14 1984. Minimum Difference Between Highest and Lowest of K Scores Solution EASY - Number 15 268. Missing Number Solution EASY - Number 16 283. Move Zeroes Solution EASY - Number 17 1523. Count Odd Numbers in an Interval Range Solution EASY - Number 18 119. Pascal's Triangle II Solution EASY - Number 19 118. Pascal's Triangle Solution EASY - Number 20 66. Plus One Solution EASY - Number 21 1299. Replace Elements with Greatest Element on Right Side Solution EASY - Number 22 7. Reverse Integer Solution EASY - Number 23 1470. Shuffle the Array Solution EASY - Number 24 136. Single Number Solution EASY - Number 25 228. Summary Ranges Solution EASY - Number 26 263. Ugly Number Solution EASY - Number 27 242. Valid Anagram Solution EASY - String 28 389. Find the Difference Solution EASY - String 29 412. Fizz Buzz Solution EASY - String 30 1071. Greatest Common Divisor of Strings Solution EASY - String 31 205. Isomorphic Strings Solution EASY - String 32 392. Is Subsequence Solution EASY - String 33 58. Length of Last Word Solution EASY - String 34 14. Longest Common Prefix Solution EASY - String 35 1189. Maximum Number of Balloons Solution EASY - String 36 Minimum append to make string palindrome Solution EASY - String 37 1047. Remove All Adjacent Duplicates In String Solution EASY - String 38 344. Reverse String Solution EASY - String 39 13. Roman to Integer Solution EASY - String 40 28. Implement strStr Solution MEDIUM - String 41 929. Unique Email Addresses Solution EASY - String 42 953. Verifying an Alien Dictionary Solution EASY - String 43 219. Contains Duplicate II Solution EASY - Map \u0026amp; Set 44 217. Contains Duplicate Solution EASY - Map \u0026amp; Set 45 1603. Design Parking System Solution EASY - Map \u0026amp; Set 46 202. Happy Number Solution EASY - Map \u0026amp; Set 47 705. Design HashSet Solution EASY - Map \u0026amp; Set 48 169. Majority Element Solution EASY - Map \u0026amp; Set 49 1. Two Sum Solution EASY - Map \u0026amp; Set 50 387. First Unique Character in a String Solution EASY - Map \u0026amp; Set 51 290. Word Pattern Solution EASY - Map \u0026amp; Set 52 1636. Sort Array by Increasing Frequency Solution EASY - Heap 53 703. Kth Largest Element in a Stream Solution EASY - Heap 54 1046. Last Stone Weight Solution EASY - Heap 55 1005. Maximize Sum Of Array After K Negations Solution EASY - Heap 56 349. Intersection of Two Arrays Solution EASY - Sliding window / Two pointer 57 696. Count Binary Substrings Solution EASY - Sliding window / Two pointer 58 Factorial Solution EASY - Sliding window / Two pointer 59 509. Fibonacci Number Solution EASY - Sliding window / Two pointer 60 125. Valid Palindrome Solution EASY - Sliding window / Two pointer 61 674. Longest Continuous Increasing Subsequence Solution EASY - Sliding window / Two pointer 62 26. Remove Duplicates from Sorted Array Solution EASY - Sliding window / Two pointer 63 27. Remove Element Solution EASY - Sliding window / Two pointer 64 977. Squares of a Sorted Array Solution EASY - Sliding window / Two pointer 65 680. Valid Palindrome II Solution EASY - Sliding window / Two pointer 66 463. Island Perimeter Solution EASY - Matrix 67 Ones in Range Solution EASY - Pre-Sum 68 303. Range Sum Query - Immutable Solution EASY - Pre-Sum 69 70. Climbing Stairs Solution EASY - DP 70 338. Counting Bits Solution EASY - DP 71 746. Min Cost Climbing Stairs Solution EASY - DP 72 1137. N-th Tribonacci Number Solution EASY - DP 73 203. Remove Linked List Elements Solution EASY - Link List 74 141. Linked List Cycle Solution EASY - Link List 75 142. Linked List Cycle II Solution EASY - Link List 76 160. Intersection of Two Linked Lists Solution EASY - Link List 77 21. Merge Two Sorted Lists Solution EASY - Link List 78 876. Middle of the Linked List Solution EASY - Link List 79 234. Palindrome Linked List Solution EASY - Link List 80 83. Remove Duplicates from Sorted List Solution EASY - Link List 81 206. Reverse Linked List Solution EASY - Link List 82 235. Lowest Common Ancestor of a Binary Search Tree Solution EASY - Binary Tree 83 938. Range Sum of BST Solution EASY - Binary Tree 84 Binary Tree two sum different level Solution EASY - Binary Tree 85 Create Binary Tree from Level order Solution EASY - Binary Tree 86 Get height of binary tree Solution EASY - Binary Tree 87 104. Maximum Depth of Binary Tree Solution EASY - Binary Tree 88 111. Minimum Depth of Binary Tree Solution EASY - Binary Tree 89 Get size of binary tree Solution EASY - Binary Tree 90 543. Diameter of Binary Tree Solution EASY - Binary Tree 91 226. Invert Binary Tree Solution EASY - Binary Tree 92 617. Merge Two Binary Trees Solution EASY - Binary Tree 93 257. Binary Tree Paths Solution EASY - Binary Tree 94 112. Path Sum Solution EASY - Binary Tree 95 100. Same Tree Solution EASY - Binary Tree 96 671. Second Minimum Node In a Binary Tree Solution EASY - Binary Tree 97 572. Subtree of Another Tree Solution EASY - Binary Tree 98 404. Sum of Left Leaves Solution EASY - Binary Tree 99 144. Binary Tree Preorder Traversal Solution EASY - Binary Tree 100 145. Binary Tree Postorder Traversal Solution EASY - Binary Tree 101 94. Binary Tree Inorder Traversal Solution EASY - Binary Tree 102 102. Binary Tree Level Order Traversal Solution EASY - Binary Tree 103 144. Binary Tree Preorder Traversal Solution EASY - Binary Tree 104 145. Binary Tree Postorder Traversal Solution EASY - Binary Tree 105 94. Binary Tree Inorder Traversal Solution EASY - Binary Tree 106 107. Binary Tree Level Order Traversal II Solution EASY - Binary Tree 107 783. Minimum Distance Between BST Nodes Solution EASY - Binary Tree 108 606. Construct String from Binary Tree Solution EASY - Binary Tree 109 101. Symmetric Tree Solution EASY - Binary Tree 110 441. Arranging Coins Solution EASY - Binary Search 111 108. Convert Sorted Array to Binary Search Tree Solution EASY - Binary Search 112 374. Guess Number Higher or Lower Solution EASY - Binary Search 113 1539. Kth Missing Positive Number Solution EASY - Binary Search 114 35. Search Insert Position Solution EASY - Binary Search 115 69. Sqrtx Solution EASY - Binary Search 116 367. Valid Perfect Square Solution EASY - Binary Search 117 844. Backspace String Compare Solution EASY - Stack \u0026amp; Monotonic Stack 118 155. Min Stack Solution EASY - Stack \u0026amp; Monotonic Stack 119 Nearest Greater to Right Solution EASY - Stack \u0026amp; Monotonic Stack 120 496. Next Greater Element I Solution EASY - Stack \u0026amp; Monotonic Stack 121 Nearest Greater to Right Solution EASY - Stack \u0026amp; Monotonic Stack 122 20. Valid Parentheses Solution EASY - Stack \u0026amp; Monotonic Stack 123 71. Simplify Path Solution EASY - Stack \u0026amp; Monotonic Stack 124 Sort a stack Solution EASY - Stack \u0026amp; Monotonic Stack 125 733. Flood Fill Solution EASY - Graph 126 1260. Shift 2D Grid Solution EASY - Graph 127 1114. Print in Order Solution EASY - Thread 128 448. Find All Numbers Disappeared in an Array Solution EASY - Cyclic sort 129 191. Number of 1 Bits Solution EASY - Bit Manipulation 130 190. Reverse Bits Solution EASY - Bit Manipulation 131 339. Nested List Weight Sum Solution EASY - Generic LeetCode - Medium Id Leetcode Solution Type 1 665. Non-decreasing Array Solution MEDIUM - Number 2 1968. Array With Elements Not Equal to Average of Neighbors Solution MEDIUM - Number 3 853. Car Fleet Solution MEDIUM - Number 4 1958. Check if Move is Legal Solution MEDIUM - Number 5 38. Count and Say Solution MEDIUM - Number 6 2466. Count Ways To Build Good Strings Solution MEDIUM - Number 7 1921. Eliminate Maximum Number of Monsters Solution MEDIUM - Number 8 Pair with diff Solution MEDIUM - Number 9 532. K-diff Pairs in an Array Solution MEDIUM - Number 10 18. 4Sum Solution MEDIUM - Number 11 12. Integer to Roman Solution MEDIUM - Number 12 841. Keys and Rooms Solution MEDIUM - Number 13 670. Maximum Swap Solution MEDIUM - Number 14 152. Maximum Product Subarray Solution MEDIUM - Number 15 918. Maximum Sum Circular Subarray Solution MEDIUM - Number 16 1899. Merge Triplets to Form Target Triplet Solution MEDIUM - Number 17 1007. Minimum Domino Rotations For Equal Row Solution MEDIUM - Number 18 2439. Minimize Maximum of Array Solution MEDIUM - Number 19 Smallest Positive Integer Solution MEDIUM - Number 20 2028. Find Missing Observations Solution MEDIUM - Number 21 31. Next Permutation Solution MEDIUM - Number 22 2001. Number of Pairs of Interchangeable Rectangles Solution MEDIUM - Number 23 50. Pow x, n Solution MEDIUM - Number 24 238. Product of Array Except Self Solution MEDIUM - Number 25 80. Remove Duplicates from Sorted Array II Solution MEDIUM - Number 26 402. Remove K Digits Solution MEDIUM - Number 27 1041. Robot Bounded In Circle Solution MEDIUM - Number 28 Shuffle Array Solution MEDIUM - Number 29 75. Sort Colors Solution MEDIUM - Number 30 280. Wiggle Sort Solution MEDIUM - Number 31 2348. Number of Zero-Filled Subarrays Solution MEDIUM - Number 32 Caesar Cipher Solution MEDIUM - String 33 165. Compare Version Numbers Solution MEDIUM - String 34 271. Encode and Decode Strings Solution MEDIUM - String 35 395. Longest Substring with At Least K Repeating Characters Solution MEDIUM - String 36 1963. Minimum Number of Swaps to Make the String Balanced Solution MEDIUM - String 37 43. Multiply Strings Solution MEDIUM - String 38 752. Open the Lock Solution MEDIUM - String 39 647. Palindromic Substrings Solution MEDIUM - String 40 838. Push Dominoes Solution MEDIUM - String 41 1209. Remove All Adjacent Duplicates in String II Solution MEDIUM - String 42 6. Zigzag Conversion Solution MEDIUM - String 43 1461. Check If a String Contains All Binary Codes of Size K Solution MEDIUM - Map \u0026amp; Set 44 554. Brick Wall Solution MEDIUM - Map \u0026amp; Set 45 2013. Detect Squares Solution MEDIUM - Map \u0026amp; Set 46 1296. Divide Array in Sets of K Consecutive Numbers Solution MEDIUM - Map \u0026amp; Set 47 535. Encode and Decode TinyURL Solution MEDIUM - Map \u0026amp; Set 48 973. K Closest Points to Origin Solution MEDIUM - Map \u0026amp; Set 49 49. Group Anagrams Solution MEDIUM - Map \u0026amp; Set 50 846. Hand of Straights Solution MEDIUM - Map \u0026amp; Set 51 380. Insert Delete GetRandom Solution MEDIUM - Map \u0026amp; Set 52 1930. Unique Length 3 Palindromic Subsequences Solution MEDIUM - Map \u0026amp; Set 53 128. Longest Consecutive Sequence Solution MEDIUM - Map \u0026amp; Set 54 146. LRU Cache Solution MEDIUM - Map \u0026amp; Set 55 146. LRU Cache Solution MEDIUM - Map \u0026amp; Set 56 187. Repeated DNA Sequences Solution MEDIUM - Map \u0026amp; Set 57 621. Task Scheduler Solution MEDIUM - Map \u0026amp; Set 58 16. 3Sum Closest Solution MEDIUM - Map \u0026amp; Set 59 1396. Design Underground System Solution MEDIUM - Map \u0026amp; Set 60 36. Valid Sudoku Solution MEDIUM - Map \u0026amp; Set 61 215. Kth Largest Element in an Array Solution MEDIUM - Heap 62 1985. Find the Kth Largest Integer in the Array Solution MEDIUM - Heap 63 1094. Car Pooling Solution MEDIUM - Heap 64 1167. Minimum Cost to Connect Sticks Solution MEDIUM - Heap 65 1834. Single-Threaded CPU Solution MEDIUM - Heap 66 355. Design Twitter Solution MEDIUM - Heap 67 451. Sort Characters By Frequency Solution MEDIUM - Heap 68 1405. Longest Happy String Solution MEDIUM - Heap 69 983. Minimum Cost For Tickets Solution MEDIUM - Heap 70 1882. Process Tasks Using Servers Solution MEDIUM - Heap 71 767. Reorganize String Solution MEDIUM - Heap 72 358. Rearrange String k Distance Apart Solution MEDIUM - Heap 73 1845. Seat Reservation Manager Solution MEDIUM - Heap 74 Sort K sorted array Solution MEDIUM - Heap 75 347. Top K Frequent Elements Solution MEDIUM - Heap 76 692. Top K Frequent Words Solution MEDIUM - Heap 77 438. Find All Anagrams in a String Solution MEDIUM - Sliding window / Two pointer 78 122. Best Time to Buy and Sell Stock II Solution MEDIUM - Sliding window / Two pointer 79 881. Boats to Save People Solution MEDIUM - Sliding window / Two pointer 80 Boats to Save People without count per boat Solution MEDIUM - Sliding window / Two pointer 81 11. Container With Most Water Solution MEDIUM - Sliding window / Two pointer 82 1838. Frequency of the Most Frequent Element Solution MEDIUM - Sliding window / Two pointer 83 904. Fruit Into Baskets Solution MEDIUM - Sliding window / Two pointer 84 5. Longest Palindromic Substring Solution MEDIUM - Sliding window / Two pointer 85 424. Longest Repeating Character Replacement Solution MEDIUM - Sliding window / Two pointer 86 159. Longest Substring with At Most Two Distinct Characters Solution MEDIUM - Sliding window / Two pointer 87 340. Longest Substring with At Most K Distinct Characters Solution MEDIUM - Sliding window / Two pointer 88 3. Longest Substring Without Repeating Characters Solution MEDIUM - Sliding window / Two pointer 89 1004. Max Consecutive Ones III Solution MEDIUM - Sliding window / Two pointer 90 1423. Maximum Points You Can Obtain from Cards Solution MEDIUM - Sliding window / Two pointer 91 1888. Minimum Number of Flips to Make the Binary String Alternating Solution MEDIUM - Sliding window / Two pointer 92 64. Minimum Path Sum Solution MEDIUM - Sliding window / Two pointer 93 209. Minimum Size Subarray Sum Solution MEDIUM - Sliding window / Two pointer 94 763. Partition Labels Solution MEDIUM - Sliding window / Two pointer 95 567. Permutation in String Solution MEDIUM - Sliding window / Two pointer 96 151. Reverse Words in a String Solution MEDIUM - Sliding window / Two pointer 97 581. Shortest Unsorted Continuous Subarray Solution MEDIUM - Sliding window / Two pointer 98 1343. Number of Sub-arrays of Size K and Average Greater than or Equal to Threshold Solution MEDIUM - Sliding window / Two pointer 99 1498. Number of Subsequences That Satisfy the Given Sum Condition Solution MEDIUM - Sliding window / Two pointer 100 15. 3Sum Solution MEDIUM - Sliding window / Two pointer 101 167. Two Sum II - Input Array Is Sorted Solution MEDIUM - Sliding window / Two pointer 102 1254. Number of Closed Islands Solution MEDIUM - Matrix 103 1905. Count Sub Islands Solution MEDIUM - Matrix 104 289. Game of Life Solution MEDIUM - Matrix 105 1428. Leftmost Column with at Least a One Solution MEDIUM - Matrix 106 695. Max Area of Island Solution MEDIUM - Matrix 107 221. Maximal Square Solution MEDIUM - Matrix 108 200. Number of Islands Solution MEDIUM - Matrix 109 417. Pacific Atlantic Water Flow Solution MEDIUM - Matrix 110 48. Rotate Image Solution MEDIUM - Matrix 111 73. Set Matrix Zeroes Solution MEDIUM - Matrix 112 1091. Shortest Path in Binary Matrix Solution MEDIUM - Matrix 113 54. Spiral Matrix Solution MEDIUM - Matrix 114 59. Spiral Matrix II Solution MEDIUM - Matrix 115 130. Surrounded Regions Solution MEDIUM - Matrix 116 286. Walls and Gates Solution MEDIUM - Matrix 117 79. Word Search Solution MEDIUM - Matrix 118 113. Path Sum II Solution MEDIUM - Backtracking 119 77. Combinations Solution MEDIUM - Backtracking 120 39. Combination Sum Solution MEDIUM - Backtracking 121 40. Combination Sum II Solution MEDIUM - Backtracking 122 2101. Detonate the Maximum Bombs Solution MEDIUM - Backtracking 123 1239. Maximum Length of a Concatenated String with Unique Characters Solution MEDIUM - Backtracking 124 131. Palindrome Partitioning Solution MEDIUM - Backtracking 125 22. Generate Parentheses Solution MEDIUM - Backtracking 126 46. Permutations Solution MEDIUM - Backtracking 127 47. Permutations II Solution MEDIUM - Backtracking 128 String permutation Solution MEDIUM - Backtracking 129 17. Letter Combinations of a Phone Number Solution MEDIUM - Backtracking 130 93. Restore IP Addresses Solution MEDIUM - Backtracking 131 1849. Splitting a String Into Descending Consecutive Values Solution MEDIUM - Backtracking 132 90. Subsets II Solution MEDIUM - Backtracking 133 78. Subsets Solution MEDIUM - Backtracking 134 1376. Time Needed to Inform All Employees Solution MEDIUM - Backtracking 135 1980. Find Unique Binary String Solution MEDIUM - Backtracking 136 523. Continuous Subarray Sum Solution MEDIUM - Pre-Sum 137 926. Flip String to Monotone Increasing Solution MEDIUM - Pre-Sum 138 2017. Grid Game Solution MEDIUM - Pre-Sum 139 528. Random Pick with Weight Solution MEDIUM - Pre-Sum 140 304. Range Sum Query 2D - Immutable Solution MEDIUM - Pre-Sum 141 560. Subarray Sum Equals K Solution MEDIUM - Pre-Sum 142 325. Maximum Size Subarray Sum Equals k Solution MEDIUM - Pre-Sum 143 0/1 knapsack Solution MEDIUM - DP 144 894. All Possible Full Binary Trees Solution MEDIUM - DP 145 1626. Best Team With No Conflicts Solution MEDIUM - DP 146 309. Best Time to Buy and Sell Stock with Cooldown Solution MEDIUM - DP 147 518. Coin Change 2 Solution MEDIUM - DP 148 322. Coin Change Solution MEDIUM - DP 149 377. Combination Sum IV Solution MEDIUM - DP 150 91. Decode Ways Solution MEDIUM - DP 151 740. Delete and Earn Solution MEDIUM - DP 152 Egg Drop Solution MEDIUM - DP 153 1884. Egg Drop With 2 Eggs and N Floors Solution MEDIUM - DP 154 198. House Robber Solution MEDIUM - DP 155 213. House Robber II Solution MEDIUM - DP 156 337. House Robber III Solution MEDIUM - DP 157 343. Integer Break Solution MEDIUM - DP 158 97. Interleaving String Solution MEDIUM - DP 159 1143. Longest Common Subsequence Solution MEDIUM - DP 160 300. Longest Increasing Subsequence Solution MEDIUM - DP 161 516. Longest Palindromic Subsequence Solution MEDIUM - DP 162 473. Matchsticks to Square Solution MEDIUM - DP 163 1911. Maximum Alternating Subsequence Sum Solution MEDIUM - DP 164 2002. Maximum Product of the Length of Two Palindromic Subsequences Solution MEDIUM - DP 165 673. Number of Longest Increasing Subsequence Solution MEDIUM - DP 166 256. Paint House Solution MEDIUM - DP 167 416. Partition Equal Subset Sum Solution MEDIUM - DP 168 698. Partition to K Equal Sum Subsets Solution MEDIUM - DP 169 279. Perfect Squares Solution MEDIUM - DP 170 Rod Cutting Solution MEDIUM - DP 171 2140. Solving Questions With Brainpower Solution MEDIUM - DP 172 1140. Stone Game II Solution MEDIUM - DP 173 877. Stone Game Solution MEDIUM - DP 174 SubSet Sum Solution MEDIUM - DP 175 SubSet Sum Count Solution MEDIUM - DP 176 494. Target Sum Solution MEDIUM - DP 177 120. Triangle Solution MEDIUM - DP 178 Unbounded knapsack Solution MEDIUM - DP 179 1035. Uncrossed Lines Solution MEDIUM - DP 180 95. Unique Binary Search Trees II Solution MEDIUM - DP 181 96. Unique Binary Search Trees Solution MEDIUM - DP 182 63. Unique Paths II Solution MEDIUM - DP 183 62. Unique Paths Solution MEDIUM - DP 184 139. Word Break Solution MEDIUM - DP 185 2. Add Two Numbers Solution MEDIUM - Link List 186 138. Copy List with Random Pointer Solution MEDIUM - Link List 187 287. Find the Duplicate Number Solution MEDIUM - Link List 188 147. Insertion Sort List Solution MEDIUM - Link List 189 109. Convert Sorted List to Binary Search Tree Solution MEDIUM - Link List 190 86. Partition List Solution MEDIUM - Link List 191 82. Remove Duplicates from Sorted List II Solution MEDIUM - Link List 192 19. Remove Nth Node From End of List Solution MEDIUM - Link List 193 143. Reorder List Solution MEDIUM - Link List 194 92. Reverse Linked List II Solution MEDIUM - Link List 195 Reverse link list even odd Solution MEDIUM - Link List 196 189. Rotate Array Solution MEDIUM - Link List 197 61. Rotate List Solution MEDIUM - Link List 198 148. Sort List Solution MEDIUM - Link List 199 24. Swap Nodes in Pairs Solution MEDIUM - Link List 200 513. Find Bottom Left Tree Value Solution MEDIUM - Binary Tree 201 173. Binary Search Tree Iterator Solution MEDIUM - Binary Tree 202 230. Kth Smallest Element in a BST Solution MEDIUM - Binary Tree 203 99. Recover Binary Search Tree Solution MEDIUM - Binary Tree 204 98. Validate Binary Search Tree Solution MEDIUM - Binary Tree 205 Check Level Order Traversal of BST Solution MEDIUM - Binary Tree 206 110. Balanced Binary Tree Solution MEDIUM - Binary Tree 207 1110. Delete Nodes And Return Forest Solution MEDIUM - Binary Tree 208 979. Distribute Coins in Binary Tree Solution MEDIUM - Binary Tree 209 114. Flatten Binary Tree to Linked List Solution MEDIUM - Binary Tree 210 106. Construct Binary Tree from Inorder and Postorder Traversal Solution MEDIUM - Binary Tree 211 105. Construct Binary Tree from Preorder and Inorder Traversal Solution MEDIUM - Binary Tree 212 1448. Count Good Nodes in Binary Tree Solution MEDIUM - Binary Tree 213 236. Lowest Common Ancestor of a Binary Tree Solution MEDIUM - Binary Tree 214 236. Lowest Common Ancestor of a Binary Tree Solution MEDIUM - Binary Tree 215 437. Path Sum III Solution MEDIUM - Binary Tree 216 814. Binary Tree Pruning Solution MEDIUM - Binary Tree 217 199. Binary Tree Right Side View Solution MEDIUM - Binary Tree 218 129. Sum Root to Leaf Numbers Solution MEDIUM - Binary Tree 219 107. Binary Tree Level Order Traversal II Solution MEDIUM - Binary Tree 220 662. Maximum Width of Binary Tree Solution MEDIUM - Binary Tree 221 103. Binary Tree Zigzag Level Order Traversal Solution MEDIUM - Binary Tree 222 538. Convert BST to Greater Tree Solution MEDIUM - Binary Tree 223 450. Delete Node in a BST Solution MEDIUM - Binary Tree 224 951. Flip Equivalent Binary Trees Solution MEDIUM - Binary Tree 225 701. Insert into a Binary Search Tree Solution MEDIUM - Binary Tree 226 1993. Operations on Tree Solution MEDIUM - Binary Tree 227 116. Populating Next Right Pointers in Each Node Solution MEDIUM - Binary Tree 228 669. Trim a Binary Search Tree Solution MEDIUM - Binary Tree 229 252. Meeting Rooms Solution MEDIUM - Interval 230 1288. Remove Covered Intervals Solution MEDIUM - Interval 231 57. Insert Interval Solution MEDIUM - Interval 232 986. Interval List Intersections Solution MEDIUM - Interval 233 56. Merge Intervals Solution MEDIUM - Interval 234 253. Meeting Rooms II Solution MEDIUM - Interval 235 435. Non-overlapping Intervals Solution MEDIUM - Interval 236 1011. Capacity To Ship Packages Within D Days Solution MEDIUM - Binary Search 237 658. Find K Closest Elements Solution MEDIUM - Binary Search 238 34. Find First and Last Position of Element in Sorted Array Solution MEDIUM - Binary Search 239 875. Koko Eating Bananas Solution MEDIUM - Binary Search 240 1898. Maximum Number of Removable Characters Solution MEDIUM - Binary Search 241 2616. Minimize the Maximum Difference of Pairs Solution MEDIUM - Binary Search 242 153. Find Minimum in Rotated Sorted Array Solution MEDIUM - Binary Search 243 81. Search in Rotated Sorted Array II Solution MEDIUM - Binary Search 244 33. Search in Rotated Sorted Array Solution MEDIUM - Binary Search 245 74. Search a 2D Matrix Solution MEDIUM - Binary Search 246 981. Time Based Key-Value Store Solution MEDIUM - Binary Search 247 210. Course Schedule II Solution MEDIUM - Topological Sort 248 456. 132 Pattern Solution MEDIUM - Stack \u0026amp; Monotonic Stack 249 735. Asteroid Collision Solution MEDIUM - Stack \u0026amp; Monotonic Stack 250 739. Daily Temperatures Solution MEDIUM - Stack \u0026amp; Monotonic Stack 251 394. Decode String Solution MEDIUM - Stack \u0026amp; Monotonic Stack 252 1856. Maximum Subarray Min-Product Solution MEDIUM - Stack \u0026amp; Monotonic Stack 253 1249. Minimum Remove to Make Valid Parentheses Solution MEDIUM - Stack \u0026amp; Monotonic Stack 254 150. Evaluate Reverse Polish Notation Solution MEDIUM - Stack \u0026amp; Monotonic Stack 255 901. Online Stock Span Solution MEDIUM - Stack \u0026amp; Monotonic Stack 256 721. Accounts Merge Solution MEDIUM - Graph 257 787. Cheapest Flights Within K Stops Solution MEDIUM - Graph 258 133. Clone Graph Solution MEDIUM - Graph 259 2359. Find Closest Node to Given Two Nodes Solution MEDIUM - Graph 260 323. Number of Connected Components in an Undirected Graph Solution MEDIUM - Graph 261 207. Course Schedule Solution MEDIUM - Graph 262 802. Find Eventual Safe States Solution MEDIUM - Graph 263 261. Graph Valid Tree Solution MEDIUM - Graph 264 2477. Minimum Fuel Cost to Report to the Capital Solution MEDIUM - Graph 265 2492. Minimum Score of a Path Between Two Cities Solution MEDIUM - Graph 266 1584. Min Cost to Connect All Points Solution MEDIUM - Graph 267 1443. Minimum Time to Collect All Apples in a Tree Solution MEDIUM - Graph 268 743. Network Delay Time Solution MEDIUM - Graph 269 1514. Path with Maximum Probability Solution MEDIUM - Graph 270 684. Redundant Connection Solution MEDIUM - Graph 271 1466. Reorder Routes to Make All Paths Lead to the City Zero Solution MEDIUM - Graph 272 994. Rotting Oranges Solution MEDIUM - Graph 273 934. Shortest Bridge Solution MEDIUM - Graph 274 1129. Shortest Path with Alternating Colors Solution MEDIUM - Graph 275 909. Snakes and Ladders Solution MEDIUM - Graph 276 Bathroom Problem Solution MEDIUM - Thread 277 1115. Print FooBar Alternately Solution MEDIUM - Thread 278 1115. Print FooBar Alternately Solution MEDIUM - Thread 279 1115. Print FooBar Alternately Solution MEDIUM - Thread 280 1116. Print Zero Even Odd Solution MEDIUM - Thread 281 134. Gas Station Solution MEDIUM - Greedy 282 45. Jump Game II Solution MEDIUM - Greedy 283 1871. Jump Game VII Solution MEDIUM - Greedy 284 55. Jump Game Solution MEDIUM - Greedy 285 179. Largest Number Solution MEDIUM - Greedy 286 678. Valid Parenthesis String Solution MEDIUM - Greedy 287 1029. Two City Scheduling Solution MEDIUM - Greedy 288 307. Range Sum Query - Mutable Solution MEDIUM - Segment Tree 289 307. Range Sum Query - Mutable Solution MEDIUM - Segment Tree 290 211. Design Add and Search Words Data Structure Solution MEDIUM - Prefix Tree / Trie 291 442. Find All Duplicates in an Array Solution MEDIUM - Cyclic sort 292 371. Sum of Two Integers Solution MEDIUM - Bit Manipulation 293 1472. Design Browser History Solution MEDIUM - Generic 294 1472. Design Browser History Solution MEDIUM - Generic 295 1472. Design Browser History Solution MEDIUM - Generic 296 1268. Search Suggestions System Solution MEDIUM - Generic LeetCode - Hard Id Leetcode Solution Type 1 273. Integer to English Words Solution HARD - Number 2 42. Trapping Rain Water Solution HARD - Number 3 472. Concatenated Words Solution HARD - String 4 2306. Naming a Company Solution HARD - String 5 154. Find Minimum in Rotated Sorted Array II Solution HARD - String 6 295. Find Median from Data Stream Solution HARD - Map \u0026amp; Set 7 460. LFU Cache Solution HARD - Map \u0026amp; Set 8 149. Max Points on a Line Solution HARD - Map \u0026amp; Set 9 1383. Maximum Performance of a Team Solution HARD - Heap 10 1851. Minimum Interval to Include Each Query Solution HARD - Heap 11 76. Minimum Window Substring Solution HARD - Sliding window / Two pointer 12 239. Sliding Window Maximum Solution HARD - Sliding window / Two pointer 13 312. Burst Balloons Solution HARD - DP 14 1220. Count Vowels Permutation Solution HARD - DP 15 72. Edit Distance Solution HARD - DP 16 32. Longest Valid Parentheses Solution HARD - DP 17 1547. Minimum Cost to Cut a Stick Solution HARD - DP 18 1553. Minimum Number of Days to Eat N Oranges Solution HARD - DP 19 52. N-Queens II Solution HARD - DP 20 51. N-Queens Solution HARD - DP 21 920. Number of Music Playlists Solution HARD - DP 22 132. Palindrome Partitioning II Solution HARD - DP 23 1866. Number of Ways to Rearrange Sticks With K Sticks Visible Solution HARD - DP 24 10. Regular Expression Matching Solution HARD - DP 25 691. Stickers to Spell Word Solution HARD - DP 26 1406. Stone Game III Solution HARD - DP 27 140. Word Break II Solution HARD - DP 28 23. Merge k Sorted Lists Solution HARD - Link List 29 25. Reverse Nodes in k-Group Solution HARD - Link List 30 297. Serialize and Deserialize Binary Tree Solution HARD - Binary Tree 31 124. Binary Tree Maximum Path Sum Solution HARD - Binary Tree 32 352. Data Stream as Disjoint Intervals Solution HARD - Interval 33 4. Median of Two Sorted Arrays Solution HARD - Binary Search 34 410. Split Array Largest Sum Solution HARD - Binary Search 35 269. Alien Dictionary Solution HARD - Topological Sort 36 115. Distinct Subsequences Solution HARD - Stack \u0026amp; Monotonic Stack 37 84. Largest Rectangle in Histogram Solution HARD - Stack \u0026amp; Monotonic Stack 38 329. Longest Increasing Path in a Matrix Solution HARD - Stack \u0026amp; Monotonic Stack 39 1964. Find the Longest Valid Obstacle Course at Each Position Solution HARD - Stack \u0026amp; Monotonic Stack 40 895. Maximum Frequency Stack Solution HARD - Stack \u0026amp; Monotonic Stack 41 85. Maximal Rectangle Solution HARD - Stack \u0026amp; Monotonic Stack 42 1857. Largest Color Value in a Directed Graph Solution HARD - Graph 43 2421. Number of Good Paths Solution HARD - Graph 44 332. Reconstruct Itinerary Solution HARD - Graph 45 778. Swim in Rising Water Solution HARD - Graph 46 127. Word Ladder Solution HARD - Graph 47 212. Word Search II Solution HARD - Prefix Tree / Trie 48 41. First Missing Positive Solution HARD - Cyclic sort Concurrency Id Leetcode Solution Type 1 Long Adder \u0026amp; Long Accumulator Solution MEDIUM 2 Callable Solution EASY 3 Create Dead Lock Solution MEDIUM 4 Produce Consumer Solution EASY 5 Produce Consumer Solution EASY 6 Produce Consumer Solution EASY 7 Semaphore Solution MEDIUM 8 ScheduledExecutorService Solution MEDIUM 9 Stop Thread Solution EASY 10 Stop Thread Solution EASY 11 Print Even Odd Solution EASY 12 Thread Starvation Solution EASY 13 Thread Abort Policy Solution EASY 14 Read Write lock Solution MEDIUM 15 ReentrantLock Solution MEDIUM 16 Stamped Lock Solution MEDIUM 17 Fork Join Solution MEDIUM 18 Common Words Solution EASY 19 Common Words multi thread Solution MEDIUM 20 Concurrent Modification Solution EASY 21 Cyclic Barrier Solution MEDIUM 22 Count Down Latch Solution EASY 23 Increment Array Solution MEDIUM 24 Phaser Solution MEDIUM 25 Completable Future Solution EASY 26 AtomicStampedReference Solution MEDIUM 27 AtomicReference Solution MEDIUM 28 Implement Custom Thread Pool Solution MEDIUM 29 Implement Custom Semaphore Solution MEDIUM 30 Parallel Stream Solution MEDIUM Java Concurrency LinkedBlockingQueue (unbounded) SynchronousQueue - space for only 1 ArrayBlockingQueue (bounded) DelayQueue - unbounded blocking queue of delayed elements, element can only be taken when its delay has expired. Thread Pool Size cpu intensive = num of cores io intensive = time it takes for IO to complete. ideal thread pool size = cores * (1 + (wait time/cpu time)) https://youtu.be/ErNre5varF8\nTypes of ExecutorService ExecutorService uses BlockingQueue by default newFixedThreadPool - LinkedBlockingQueue newSingleThreadExecutor newCachedThreadPool - SynchronousQueue, dynamically scale the threads to handle the amount of tasks, threads are idle for 60 second, they will be scaled down Rejection policy AbortPolicy - This is the default policy. It causes the executor to throw a RejectedExecutionException. CallerRunsPolicy - the producer thread will be employed to run the task it just submitted. This is effective back pressure. DiscardOldestPolicy - accept the task and throw away the oldest task in the BlockingQueue DiscardPolicy - accept the task but silently throw it away Custom Policy - We can implement the RejectedExecutionHandler interface and provide our own logic to handle Mutex vs Semaphore Mutex (or Mutual Exclusion Semaphores) is a locking mechanism used to synchronize access to a resource. Only one task can acquire the mutex. It means there will be ownership associated with mutex, and only the owner can release the lock (mutex). Semaphore (or Binary Semaphore) is signaling mechanism (“I am done, you can carry on” kind of signal). A binary semaphore is NOT protecting a resource from access. Semaphores are more suitable for some synchronization problems like producer-consumer. Short version:\nMutex can be released only by the thread that had acquired it. Binary Semaphore can be signaled by any thread (or process). Read vs Write lock Read Lock – if no thread acquired the write lock or requested for it then multiple threads can acquire the read lock Write Lock – if no threads are reading or writing then only one thread can acquire the write lock\nConcurrency CyclicBarrier - CyclicBarriers are used in programs in which we have a fixed number of threads that must wait for each other to reach a common point before continuing execution. Phaser CountDownLatch Exchanger - share objects between two threads of type T Semaphore SynchronousQueue More Questions ShutdownNow vs Shutdown Dynamic Striping - Striped64 class Deadlock vs Livelock Lock vs Synchronized Block ReentrantReadWriteLock.ReadLock vs ReentrantReadWriteLock.WriteLock Stamped lock - optimistic locking DelayQueue SQL Start a Postgres DB\n1docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:9.6.10 2docker ps 3docker exec -it pg-container psql -U postgres -W postgres 4CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 5CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 6grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 7\\c test-db 8 9docker stop pg-container 10docker start pg-container Create the tables \u0026amp; Seed the test data\n1create table department 2( 3 id serial not null 4 constraint department_pk 5 primary key, 6 name varchar 7); 8 9alter table department owner to test; 10 11create table employee 12( 13 id serial not null 14 constraint employee_pk 15 primary key, 16 name varchar, 17 salary integer, 18 department_id integer, 19 manager integer, 20 dob date 21); 22 23alter table employee owner to test; 24 25create table project 26( 27 id serial not null 28 constraint project_pk 29 primary key, 30 name varchar 31); 32 33alter table project owner to test; 34 35create table employee_project_mapping 36( 37 id serial not null 38 constraint employee_project_mapping_pk 39 primary key, 40 emp_id integer 41 constraint fk1 42 references employee 43 on update cascade on delete cascade, 44 project_id integer 45 constraint fk2 46 references project 47 on update cascade on delete cascade 48); 49 50alter table employee_project_mapping owner to test; 51 52INSERT INTO department (id, name) VALUES (1, \u0026#39;IT\u0026#39;); 53INSERT INTO department (id, name) VALUES (2, \u0026#39;Sales\u0026#39;); 54INSERT INTO department (id, name) VALUES (3, \u0026#39;Admin\u0026#39;); 55 56INSERT INTO employee (id, name, salary, department_id, manager, dob) VALUES (1, \u0026#39;Joe\u0026#39;, 85000, 1, 5, \u0026#39;1990-02-10\u0026#39;); 57INSERT INTO employee (id, name, salary, department_id, manager, dob) VALUES (2, \u0026#39;Henry\u0026#39;, 80000, 2, null, \u0026#39;1975-02-10\u0026#39;); 58INSERT INTO employee (id, name, salary, department_id, manager, dob) VALUES (3, \u0026#39;Sam\u0026#39;, 60000, 2, 4, \u0026#39;1975-02-10\u0026#39;); 59INSERT INTO employee (id, name, salary, department_id, manager, dob) VALUES (4, \u0026#39;Max\u0026#39;, 90000, 1, 5, \u0026#39;1981-02-10\u0026#39;); 60INSERT INTO employee (id, name, salary, department_id, manager, dob) VALUES (5, \u0026#39;Janet\u0026#39;, 69000, 1, 1, \u0026#39;1983-02-10\u0026#39;); 61INSERT INTO employee (id, name, salary, department_id, manager, dob) VALUES (6, \u0026#39;Max\u0026#39;, 84000, 1, 1, \u0026#39;2005-02-10\u0026#39;); 62INSERT INTO employee (id, name, salary, department_id, manager, dob) VALUES (7, \u0026#39;Will\u0026#39;, 70000, 1, 1, \u0026#39;1982-02-10\u0026#39;); 63INSERT INTO employee (id, name, salary, department_id, manager, dob) VALUES (8, \u0026#39;Raj\u0026#39;, 65000, null, 1, \u0026#39;1978-02-10\u0026#39;); 64INSERT INTO employee (id, name, salary, department_id, manager, dob) VALUES (9, \u0026#39;Suresh\u0026#39;, 62000, null, 1, \u0026#39;1978-02-10\u0026#39;); 65INSERT INTO employee (id, name, salary, department_id, manager, dob) VALUES (10, \u0026#39;Sam\u0026#39;, 61000, 2, 1, \u0026#39;1985-02-10\u0026#39;); 66 67INSERT INTO project (id, name) VALUES (1, \u0026#39;Project 1\u0026#39;); 68INSERT INTO project (id, name) VALUES (2, \u0026#39;Project 2\u0026#39;); 69INSERT INTO project (id, name) VALUES (3, \u0026#39;Project 3\u0026#39;); 70INSERT INTO project (id, name) VALUES (4, \u0026#39;Project 4\u0026#39;); 71 72INSERT INTO employee_project_mapping (id, emp_id, project_id) VALUES (1, 1, 1); 73INSERT INTO employee_project_mapping (id, emp_id, project_id) VALUES (2, 1, 2); 74INSERT INTO employee_project_mapping (id, emp_id, project_id) VALUES (3, 3, 3); 75INSERT INTO employee_project_mapping (id, emp_id, project_id) VALUES (4, 4, 3); 76INSERT INTO employee_project_mapping (id, emp_id, project_id) VALUES (5, 5, 2); 77INSERT INTO employee_project_mapping (id, emp_id, project_id) VALUES (6, 6, 1); 78INSERT INTO employee_project_mapping (id, emp_id, project_id) VALUES (7, 7, 2); 1--SQL Query PostgreSQL 2 3--first max salary 4select distinct salary 5from employee 6order by salary desc 7limit 1; 8 9-- https://leetcode.com/problems/second-highest-salary/ 10-- second max salary 11select distinct salary 12from employee 13order by salary desc 14limit 1 offset 1; 15 16-- second max salary 17select max(salary) 18from employee 19where salary \u0026lt; (select max(salary) from employee); 20 21-- if 2nd salary doesnt exist show null 22select NULLIF( 23 (select distinct salary 24 from employee 25 order by salary Desc 26 limit 1 offset 1), null 27 ) as SecondHighestSalary; 28 29-- https://leetcode.com/problems/department-top-three-salaries/ 30-- top 3 salaries in each department 31select d.name, e.id, e.name, e.salary 32from employee e, 33 department d 34where e.department_id = d.id 35 and ( 36 select count(distinct (e2.salary)) 37 from employee e2 38 where e2.salary \u0026gt; e.salary 39 and e2.department_id = e.department_id 40 ) \u0026lt; 3 41order by (d.name, e.name); 42 43--max salary in each department 44select d.name, max(e.salary) 45from employee e, 46 department d 47where e.department_id = d.id 48group by d.id; 49 50--max salary in each department with employee name 51select d.name, e.id, e.name, e.salary 52from employee e, 53 department d 54where e.department_id = d.id 55 and ( 56 select count(distinct (e2.salary)) 57 from employee e2 58 where e2.salary \u0026gt; e.salary 59 and e.department_id = e2.department_id 60 ) \u0026lt; 1 61order by (d.name, e.name); 62 63--find all employee and department they work in, only show employees who have a department assigned. 64SELECT e.name, d.name 65FROM employee e 66 INNER JOIN department d ON e.department_id = d.id; 67 68--inner join 69SELECT e.name, d.name 70FROM employee e, 71 department d 72where e.department_id = d.id; 73 74--find all employee who dont have a department 75SELECT e.name, d.name 76FROM employee e 77 LEFT JOIN department d ON e.department_id = d.id 78where d.name is null; 79 80--find max salary in department even if no employee in it. 81SELECT d.name, max(e.salary) 82FROM employee e 83 RIGHT JOIN department d ON e.department_id = d.id 84GROUP BY d.name; 85 86--find department without an employee 87SELECT d.name, e.name 88FROM employee e 89 RIGHT JOIN department d ON e.department_id = d.id 90where e.name is null; 91 92--show all employee and department even if they dont have assignment 93SELECT e.name, d.name 94FROM employee e 95 FULL JOIN department d ON e.department_id = d.id; 96 97--find all employees having salary greater than average 98SELECT e.name, e.salary 99FROM employee e 100WHERE salary \u0026gt; (SELECT AVG(salary) from employee); 101 102-- find people with same name in same department 103SELECT e.name as emp_name, d.name as department 104FROM employee e, 105 department d 106where e.department_id = d.id 107group by emp_name, department 108having count(*) \u0026gt; 1; 109 110-- find all employees and their manager 111SELECT e.name, m.name 112FROM employee e, 113 employee m 114WHERE e.manager = m.id; 115 116-- find all employees who dont have a manager 117SELECT e.name, m.name 118FROM employee e 119 LEFT JOIN employee m on e.manager = m.id 120WHERE m.name is null; 121 122-- find all employees and their manager, if they dont have manager show null 123SELECT e.name, m.name 124FROM employee e 125 LEFT JOIN employee m on e.manager = m.id; 126 127-- find all employees and the projects they are working in along with department. 128-- one employee can work on multiple projects 129select e.name, d.name department, p.name project 130from employee e, 131 department d, 132 employee_project_mapping m, 133 project p 134where e.department_id = d.id 135 and e.id = m.emp_id 136 and m.project_id = p.id; 137 138-- find employees who age is greater than 25 139select e.name, e.dob, age(CURRENT_DATE, e.dob) 140from employee e 141where EXTRACT(YEAR FROM age(CURRENT_DATE, e.dob)) \u0026gt; 25; 142 143-- find the oldest employee 144select e.id, e.name, max(age(CURRENT_DATE, e.dob)) emp_age 145from employee e group by e.id, e.name 146order by emp_age desc limit 1; 147 148--find the project and number of employees working on it. 149select p.name project, count(*) 150from project p, 151 employee_project_mapping m, 152 employee e 153where p.id = m.project_id 154 and m.emp_id = e.id 155group by project; 156 157--find the projects with no employees 158select p.id, p.name 159from project p 160 left join employee_project_mapping m on p.id = m.project_id 161where m.emp_id is null; 162 163--find the employees with no project 164select e.id, e.name 165from employee e 166 left join employee_project_mapping m on e.id = m.emp_id 167where m.project_id is null; 168 169--oldest person in each department 170select d.name, e.id, e.name, e.dob 171from employee e, 172 department d 173where e.department_id = d.id 174 and ( 175 select count(distinct(e2.dob)) 176 from employee e2 177 where e.dob \u0026gt; e2.dob 178 and e.department_id = e2.department_id 179 ) \u0026lt; 1 180order by (d.name, e.name); 181 182--current date 183SELECT CURRENT_DATE; 184SELECT extract(year from CURRENT_DATE) as \u0026#34;Year\u0026#34;; 185 186--find all employee born between 1980-1990 187select * 188from employee e 189where e.dob between \u0026#39;01-01-1980\u0026#39; and \u0026#39;01-01-1990\u0026#39;; 190 191--find all employees who name begins with M 192select * 193from employee 194where name like \u0026#39;M%\u0026#39;; 195 196--find all employees other than Max 197select * 198from employee 199where name \u0026lt;\u0026gt; \u0026#39;Max\u0026#39;; 200 201--find all employees with name of Max 202select * 203from employee 204where name = \u0026#39;Max\u0026#39;; 205 206--find employees who are in project1 and project2 207select e.id, e.name 208from employee e, 209 employee_project_mapping m, 210 project p 211where m.emp_id = e.id 212 and m.project_id = p.id 213 and p.name = \u0026#39;Project 1\u0026#39; 214INTERSECT 215select e.id, e.name 216from employee e, 217 employee_project_mapping m, 218 project p 219where m.emp_id = e.id 220 and m.project_id = p.id 221 and p.name = \u0026#39;Project 2\u0026#39;; Youtube Channels NeetCode\nTushar Roy - Coding Made Simple\nReferences https://www.youtube.com/c/NeetCode\nhttps://medium.com/interviewnoodle/grokking-leetcode-a-smarter-way-to-prepare-for-coding-interviews-e86d5c9fe4e1\nhttps://designgurus.org/course/grokking-the-coding-interview\nhttps://algs4.cs.princeton.edu/cheatsheet/\nhttps://www.bigocheatsheet.com/\nhttps://seanprashad.com/leetcode-patterns/\nhttps://www.teamblind.com/post/New-Year-Gift---Curated-List-of-Top-75-LeetCode-Questions-to-Save-Your-Time-OaM1orEU\nhttps://walkccc.me/LeetCode/preface/\nhttps://neetcode.io/\n","link":"https://gitorko.github.io/post/grokking-the-coding-interview/","section":"post","tags":["coding","interview","algorithms","data-structure"],"title":"Grokking the Coding Interview"},{"body":"","link":"https://gitorko.github.io/tags/interview/","section":"tags","tags":null,"title":"Interview"},{"body":"","link":"https://gitorko.github.io/categories/leetcode/","section":"categories","tags":null,"title":"Leetcode"},{"body":"System Design We start with requirement gathering\n1. Functional requirements What is the start \u0026amp; end result/state? Is this is a live service or a background service? Does this need stream processing or batch processing? How many users? Does the data need to be persisted? Does the data need to be consistent? Does the service need external API? Does the service need role based authentication/authorization? 2. Non-Functional requirements What is the latency expected? What is the fault tolerance criteria? What is load the system needs to handle and scale requirements? What observability \u0026amp; monitoring is needed by the system? What is the uptime \u0026amp; availability SLA? What security aspects need to be addressed? Does it need auditing \u0026amp; reporting? 3. Capacity planning How many servers would you need? How many users (load) are going to access the service? How much storage is required? What network bandwidth is required? What latency can be tolerated? Do you need GPU specific processors or CPU specific processors? What time of the day do you need the servers? What is the budget and expenses related to the servers? Back Of Envelope estimation\nLoad Estimation\nHow many requests per second need to be handled?\nType Count Description Average users per day 10^6 1 million Average requests per user 10 Average total requests per day 10^7 10 million requests Average total requests per sec 100 Storage Estimation\nHow much storage is needed for 5 year?\nType Count Description Average Total requests per day 10^7 10 million requests Average size of request per user 2 MB Average size of request per day 20^7 MB 20 TB Average size of request for 5 year day 36 PB Bandwidth Estimation\nHow much network bandwidth is needed?\nType Count Description Average size of request per day 20^7 MB 20 TB Average size of request per sec 230 MB/Sec Latency Estimation\nWhat latency is acceptable?\nType Count Description Sequential Latency 100 ms Sum of latency of all sources Parallel Latency 75 ms Max of latency of all sources Resource Estimation\nHow many CPU core/servers are needed?\nType Count Description Average total requests per sec 100 req/sec Average cpu processing time per request 100 ms/req Average cpu processing time per sec 10^6 ms/sec Average 1 cpu core processing per sec 10^5 ms/sec Average number of cpu core 10 High Level Design (HLD) High Level Design (HLD) often is very open-ended and broad. It's a 30,000 foot view of the system that covers what the various components of the systems and how they interact with each other. The objective here is to come up with various sub-systems and modules and identify how they will interact with each other. Eg: Design Food Delivery App, Design Uber, Design Twitter.\nComponent diagrams Sequence diagrams Use-cases API Interaction Low Level Design (LLD) Low Level Design (LLD) involves picking a specific module/sub-system from the HLD and doing a deep dive into its implementations. The problem statement is more detailed and outcome is clear. Eg: Design the order acceptance system for food delivery app that can cater to 7-10K requests per hour.\nEntity Relationship diagrams Decision tree/Flow chart Class diagrams Swim-lane diagrams Fundamentals 1. Short-Polling vs Long-Polling vs SSE (Server Sent Events) vs Websocket Short-Polling - Client continuously asks the server for new data. Long-Polling - Client continuously asks the server for new data, but server waits for a few seconds and if data becomes available by then it will return the data. Websocket - HTTP connection is upgraded to bidirectional connection. Server Sent Events - HTTP connection is kept open by the server and data is pushed to client continuously over it. Websocket Server Sent Event Long-Poll Type Of Channel Full-duplex,Bidirectional Half-duplex,Unidirectional Half-duplex,Unidirectional Type of Client Server Push \u0026amp; Client Send Server Push Client Pull Type of Data Text + Binary Text Text + Binary Connection Limit 65,536 (max number of TCP ports) 6-8 parallel per domain Based on threads available https://youtu.be/ZBM28ZPlin8\n2. Fork Join Pool Fork Join is suited for tasks that create sub-tasks. Fork/Join framework uses work-stealing algorithm. Work stealing is a scheduling strategy where worker threads that have finished their own tasks can steal pending tasks from other threads. Uses a deque (double ended queue), main thread picks task from the front of the queue, other threads steal tasks from the back of the queue.\n1 2@RequiredArgsConstructor 3class FibForkJoin extends RecursiveTask\u0026lt;Integer\u0026gt; { 4 final int n; 5 6 @Override 7 protected Integer compute() { 8 System.out.println(\u0026#34;Current Thread: \u0026#34; + Thread.currentThread().getName() + \u0026#34; n = \u0026#34; + n); 9 if (n \u0026lt;= 1) { 10 return n; 11 } 12 FibForkJoin f1 = new FibForkJoin(n - 1); 13 f1.fork(); 14 FibForkJoin f2 = new FibForkJoin(n - 2); 15 f2.fork(); 16 return f1.join() + f2.join(); 17 } 18} https://youtu.be/5wgZYyvIVJk\n3. Distributed Coordination For distributed systems, achieving coordination and consistency despite unreliable communication requires following protocols\nTwo phase (prepare \u0026amp; commit) - Blocking protocol as it waits for the prepare-ack for prepare phase. Three phase commit (prepare, pre-commit \u0026amp; commit) - Non-Blocking protocol as first phase gathers votes and only the second phase blocks with timeout. Consensus Algorithms (e.g., Paxos, Raft) https://youtu.be/jGJT1FRYGcY\nhttps://youtu.be/S4FnmSeRpAY\n4. Saga Pattern Try to avoid distributed transactions. As it makes the system complex to manage.\nA sequence of transactions that updates each service and publishes a message or event to trigger the next transaction step. Each local transaction updates the database and publishes a message/event to trigger the next local transaction in another service. If a local transaction fails then the saga executes a series of compensating transactions that undo the changes that were made by the preceding transactions.\nThe 2 approaches\nChoreography - Each local transaction publishes domain events that trigger local transactions in other services. Orchestration - An orchestrator tells the participants what local transactions to execute. Problems with saga\nHard to debug \u0026amp; test. Risk of cyclic dependency between saga participants. 5. Locking \u0026amp; Transaction Isolation Locking ensures that the row is not concurrently updated by 2 different threads which might corrupt the data.\nProblem:\nThread A: Reads row with amount 100$ in Transaction T1 Thread B: Reads row with amount 100$ in Transaction T2 Thread A: Adds 10$, new amount is 110$ Thread B: Adds 10$, new amount is still 110$ instead of 120$.\nSolution 1 (Optimistic Locking):\nThread A: Reads row with amount 100$ in Transaction T1 Thread B: Reads row with amount 100$ in Transaction T2 Thread A: Adds 10$, new amount is 110$ Thread B: Adds 10$ and tries to save but sees that the record is not the same record that it read. So fails \u0026amp; does retry.\nSolution 2 (Pessimistic Locking):\nThread A: Reads row with amount 100$ in Transaction T1, it holds a row level lock. Thread B: Reads row in Transaction T2 but is blocked as T1 holds a lock, So it waits till timeout happens \u0026amp; retry. Thread A: Adds 10$, new amount is 110$ Thread B: Reads row with updated amount 110$ and updates to 120$\nTypes of locking\nPessimistic Locking - Locks held at row level or table level. Not ideal of high performance \u0026amp; cant scale. Optimistic Locking - Version field is added to the table, JPA ensures that version check is done before saving data, if the version has changed then update will throw Error. Ideal for high performance \u0026amp; can scale. Pessimistic locking\nLockModeType.PESSIMISTIC_READ - Rows are locked and can be read by other transactions, but they cannot be deleted or modified. PESSIMISTIC_READ guarantees repeatable reads. LockModeType.PESSIMISTIC_WRITE - Rows are locked and cannot be read, modified or deleted by other transactions. For PESSIMISTIC_WRITE no phantom reads can occur and access to data must be serialized. LockModeType.PESSIMISTIC_FORCE_INCREMENT - Rows are locked and cannot be read, modified or deleted by other transactions. it forces an increment of the version attribute Lock the row being read to avoid the same row from being updated by 2 different transactions\nselect * from table FOR SHARE - This clause locks the selected rows for read, other threads can read but cant modify. select * from table FOR UPDATE - This clause locks the selected rows for update. This prevents other transactions from reading/modifying these rows until the current transaction is completed (committed or rolled back) select * from table FOR UPDATE SKIP LOCKED clause - This clause tells the database to skip rows that are already locked by another transaction. Instead of waiting for the lock to be released\nOptimistic locking\nLockModeType.OPTIMISTIC - Checks the version attribute of the entity before committing the transaction to ensure no other transaction has modified the entity. LockModeType.OPTIMISTIC_FORCE_INCREMENT - Forces a version increment of the entity, even if the entity has not been modified during the update. Transaction Isolation\nTransaction isolation levels in JPA define the degree to which the operations within a transaction are isolated from the operations in other concurrent transactions JPA, typically using the underlying database and JDBC settings\nIsolation.READ_UNCOMMITTED Read Uncommitted - The lowest level of isolation. Transactions can read uncommitted changes made by other transactions. Isolation.READ_COMMITTED Read Committed - Transactions can only read committed changes made by other transactions. Isolation.REPEATABLE_READ Repeatable Read - If a transaction reads a row, it will get the same data if it reads the row again within the same transaction. Isolation.SERIALIZABLE Serializable - The highest level of isolation. Transactions are completely isolated from one another. Data Consistency\nDirty reads: read UNCOMMITED data from another transaction. Non-repeatable reads: read COMMITTED data from an UPDATE query from another transaction. Phantom reads: read COMMITTED data from an INSERT or DELETE query from another transaction. Dirty Read\nNAME AGE Bob 35 TRANSACTION T1 TRANSACTION T2 select age from table where name = 'Bob'; (35) update table set age = 40 where name = 'Bob'; select age from table where name = 'Bob'; (40) commit; Non-Repeatable Read\nNAME AGE Bob 35 TRANSACTION T1 TRANSACTION T2 select age from table where name = 'Bob'; (35) update table set age = 40 where name = 'Bob'; commit; select age from table where name = 'Bob'; (40) Phantom Read\nNAME AGE Bob 35 TRANSACTION T1 TRANSACTION T2 select count(*) from table where age = 35; (1) insert into table values ('jack', 35); commit; select count(*) from table where age = 35; (2) Behaviour of Isolation Levels\nIsolation Level Dirty Non-Repeatable Reads Phantom Reads Read Uncommitted Yes Yes Yes Read Committed No Yes Yes Read Committed No No Yes Serializable No No No 1spring: 2 jpa: 3 properties: 4 hibernate: 5 connection: 6 isolation: 2 1@Transactional(isolation = Isolation.SERIALIZABLE) 1SHOW default_transaction_isolation; Transaction Propagation\nWhen one transaciton functions calls another in the same class boundary then the parent transaction level is applied. You need to move the function to a different public class if you want its transaction to be enforced. When nested calls happen on transaction boundary then the transaction is suspended.\n@Transactional(readOnly = true) - transaction is readonly and now updates can happen. @Transactional(propagation = Propagation.REQUIRES_NEW) - creates a new transaction. @Transactional(propagation = Propagation.REQUIRED) - default, spring will create a new transaction if not present. @Transactional(propagation = Propagation.MANDATORY) - will throw exception if transaction doesn't exist. @Transactional(propagation = Propagation.SUPPORTS) - if existing transaction present then it will be used, else operation will happen without any transaction. @Transactional(propagation = Propagation.NOT_SUPPORTED) - operation will have with no transaction. @Transactional(propagation = Propagation.NOT_SUPPORTED) - will throw an exception if transaction present. You can define which exception call the rollback and which don't.\n1@Transactional(noRollbackFor = {CustomException.class}, rollbackFor = {RuntimeException.class}) To track transactions\n1logging: 2 level: 3 root: info 4 org.springframework.orm.jpa.JpaTransactionManager: DEBUG Spring keeps the transaction open till the controller returns the response. This is because it thinks that the object may be accessed later in the HTML (web mvc templates). We don't use this, so we will set the below property to false that way transaction is closed after @Transactional function ends.\n1spring: 2 jpa: 3 open-in-view: false By setting auto-commit to false spring won't commit immediately but will commit when the transaction ends.\n1spring: 2 datasource: 3 hikari: 4 auto-commit: false You can also use TransactionTemplate to control transactions if you dont want to use @Transactional and want more control. Try to the transaction boundary small. External calls need to be done outside the transaction context.\n1transactionTemplate.executeWithoutResult() 2transactionTemplate.execute() 6. Indexing Database indexes help improve the speed and efficiency of querying data in a database\nClustered index - A special type of index that reorders the way records in the table are physically stored. Therefore, table can have only one clustered index. The leaf nodes of a clustered index contain the data pages. eg: primary key Non-clustered index - A special type of index in which the logical order of the index does not match the physical stored order of the rows on disk. The leaf node of a non-clustered index does not consist of the data pages. Instead, the leaf nodes contain index rows. eg: unique constraints Clustered vs Non-Clustered Index\nClustered Index Non-Clustered Index Faster Slower Requires less memory Requires more memory Index leaf node is the main data Index leaf node is pointer to data Table can have only one clustered index Table can have multiple non-clustered index Each new index will increase the time it takes to write new records. The where clause should have columns which are indexed for the right index to be used. The like clause doesn't use index column because it's a match query. If you want to explicitly use certain index you can use hints.The db query executor can choose to use it but there are no guarantees. To explain the plan\n1EXPLAIN SELECT * FROM table; To execute and explain the plan\n1EXPLAIN ANALYZE * FROM table; Tradeoff\nStorage Space: Indexes consume additional storage space, as they create and maintain separate data structures alongside the original table data. Write Performance: When data is inserted, updated, or deleted in a table, the associated indexes must also be updated, which can slow down write operations. https://youtu.be/-qNSXK7s7_w\n7. Vertical Scaling vs Horizontal Scaling Vertical scaling you increase/add resources like more memory, cpu etc. Horizontal scaling you add more servers. 8. Datacenter Overview of a datacenter layout\nEach Availability zone is plugged to a different power supply, cooling, networking. Availability Set each set is on a separate server rack (fault domain). Failure affects only few racks. Paired region allows replication across multiple region. eg: Zone1 of RegionA is paired with Zone1 of RegionB Grouping of Availability Set:\nFault domain - Grouping of servers based on rack (power, network input). Update domain - Grouping of servers based on which group can be powered on/off. Outages:\nIf there is a wire cut in the rack availability set is down. If there is a fire in one-floor/one-building of the datacenter then one zone is down, other floors/building in same region are isolated hence remain operational. If there is a hurricane in the region cutting all power then all zones in that region are down. 9. Distributed System \u0026amp; Microservices Distributed system\nCharacteristics of distributed system\nFailure - Always assume that things will fail and plan for it. Eg: Network failures, Disk failures Circuit Breaker Pattern - Instead of throwing error page handle service down gracefully. Service Discovery - All services register themselves making it easy to lookup services. Observability - System is actively monitored. Fail-Over - Stand by server go live when primary servers dies. Throughput - The number of requests the system can process. Latency - Time taken to process the requests. Rate Limit - Restrict overuse of services by single or many users. Caching - Caching speeds up lookup however can bring in-consistency among caches. Bulk head pattern - Failure in one system should not bring down the whole system. Timeout - Ensure proper connection timeouts are set so that slow downstream service cant impact upstream service. Fail-fast - Prefer to fail fast than deal with slow latency, as it can cascade the effect in upstream services. Fault Tolerance - Ability to deal with failure in system. eg: Chaos Monkey - Randomly turn off systems to ensure system is fault-tolerant. Retry - Systems can come up or go down, have ability to retry once it recovers. Data durability \u0026amp; Consistency - failure rates of storage, corruption rate in read-write process Replication - backing up data, active replication vs passive replication. High-Availability - If downtime are not acceptable then system should always be up. Trade-Offs - Every choice comes with its shortcoming be aware of it. Scaling - System should be able to cope with increased and decreased load. Microservices\nMicroservices is an architectural style where applications are structured as a collection of small, loosely-coupled, and independently deployable services. Each microservice is responsible for a specific piece of functionality within the application \u0026amp; communicates with other microservices through well-defined APIs\nCharacteristics of microservices\nSingle Responsibility: Specific functionality, Makes the services easier to understand, develop, and maintain. Independence: Develop, deploy, and scale services independently of one another. Loose coupling. Decentralized: Each service owns its data and business logic. Each service has its own database. Communication: Communicate with each other using lightweight protocols such as HTTP/REST, gRPC, or message queues. Fault Tolerance: Failure in one service does not necessarily cause the entire system to fail. Improves resiliency. 10. Caching Points Advantages of Caching\nImproves performance of application Reduces latency Reduces load on the DB Reduces network cost Increases Read Throughput. However, it does come with its own cost/problems like cache invalidation, stale data, high churn if TTL (time-to-live) is set wrong, thundering herd etc. A distributed cache (read-write) comes with problems of its own like consistency, node affinity etc.\nDifferent places to cache\nClient side caching - When the client or browser can cache some data to avoid the external call. Server side caching - Each server can cache some data locally. Global/Distributed caching - A centralized server/service to cache data. Proxy/Gateway side caching - Proxy or gateway servers cache some data so the request can be returned immediately without reaching out to backend. 11. Types of Cache Spatial cache - Locality based cache, bring all nearby associated data from disk to cache. Eg: If fetching user profile also load user rating to cache. Temporal cache - Cache stores elements that are frequently used. Eg: LRU Distributed cache - Cache spread across many nodes, keeping cache in sync with store is important. https://youtu.be/ccemOqDrc2I\n12. Caching HashMap vs Cache\nDisadvantage of using hashmap over cache is that hashmap can cause memory overflow without eviction \u0026amp; doesn't support write to disk.\nEhcache will only evict elements when putting elements and your cache is above threshold. Otherwise, accessing those expired elements will result in them being expired (and removed from the Cache). There is no thread that collects and removes expired elements from the Cache in the background.\nTypes of store\nOn-Heap Store - stores cache entries in Java heap memory Off-Heap Store - primary memory (RAM) to store cache entries, cache entries will be moved to the on-heap memory automatically before they can be used. Disk Store - uses a hard disk to store cache entries. SSD type disk would perform better. Clustered Store - stores cache entries on the remote server Memory areas supported by Ehcache\nOn-Heap Store: Uses the Java heap memory to store cache entries and shares the memory with the application. The cache is also scanned by the garbage collection. This memory is very fast, but also very limited. Off-Heap Store: Uses the RAM to store cache entries. This memory is not subject to garbage collection. Still quite fast memory, but slower than the on-heap memory, because the cache entries have to be moved to the on-heap memory before they can be used. Disk Store: Uses the hard disk to store cache entries. Much slower than RAM. It is recommended to use a dedicated SSD that is only used for caching. Caching Strategies\nRead heavy caching strategies\nRead-Cache-aside - Application queries the cache. If the data is found, it returns the data directly. If not it fetches the data from the SoR, stores it into the cache, and then returns. Read-Through - Application queries the cache, cache service queries the SoR if not present and updates the cache and returns. Write heavy caching strategies\nWrite-Around - Application writes to db and to the cache. Write-Behind / Write-Back - Application writes to cache. Cache is pushed to SoR after some delay periodically. Write-through - Application writes to cache, cache service immediately writes to SoR. 13. Cache Eviction Policies FIFO (First In First Out) - replaces first element that was added to the cache. eg: queue LIFO (Last In First Out) - replaces the last element that was added to the cache. eg: stack LRU (Least Recently Used) - replaces element that has not been used for the longest time. eg: frequently accessed item based on timestamp remain in cache MRU (Most Recently Used) - replaces most recently used elements. LFU (Least Frequently Used) - replaces least frequently used elements based on count. eg: frequently accessed item based on count remain in cache RR (Random Replacement) - replaces elements randomly. 14. Virtual Thread JDK21 Virtual threads aim to improve the concurrency model in Java by introducing lightweight, user-mode threads that can efficiently handle a large number of concurrent tasks.\nIf your code calls a blocking I/O operation in a virtual thread, the runtime suspends the virtual thread until it can be resumed later. The hardware is utilized to an almost optimal level, resulting in high levels of concurrency and, therefore, high throughput.\nPitfalls to avoid in Virtual Threads\nExceptions - Stack traces are separate, and any Exception thrown in a virtual thread only includes its own stack frames. Thread-local - Reduce usage as each thread will end up creating its own thread local unlike before where there are limited threads in pool, virtual threads can be many as they are cheap to create. Synchronized blocks/methods - When there is synchronized method or block used the virtual thread is pinned to a platform thread, it will not relinquish its control. This means it will hold the platform thread which can cause performance issues if there is IO happening inside the synchronized block. Use ReentrantLock instead of synchronized. Native code - When native code is used virtual threads get pinned to platform threads, it will not relinquish its control. This may be problematic if IO happens for longer time there by blocking/holding the platform thread. Thread pools - Avoid thread pool to limit resource access, eg: A thread pool of size 10 can create more than 10 concurrent threads due to virtual threads hence use semaphore if you want to limit concurrent requests based on pool size. Spring - In sprint context use concurrency-limit to limit number of thread pool and avoid runway of virtual threads. Performance - Platform threads are better when CPU intensive tasks are executed compared to virtual threads. Virtual threads benefit only when there is IO. Context switching - When virtual threads have blocking operation they yield and JVM moves the stack to heap memory. The stack is put back only when its time to execute the thread again. This is still cheaper than creating a new platform thread though. 1Runnable fn = () -\u0026gt; { 2 System.out.println(\u0026#34;Running in thread: \u0026#34; + Thread.currentThread().getName()); 3}; 4 5Thread.ofVirtual().name(\u0026#34;virtual-thread-1\u0026#34;).start(fn); 6Thread.ofPlatform().name(\u0026#34;platform-thread-1\u0026#34;).start(fn); 7 8new Thread(fn, \u0026#34;platform-thread-2\u0026#34;).start(); 9 10var executors = Executors.newVirtualThreadPerTaskExecutor(); 11executors.submit(() -\u0026gt; { 12 System.out.println(\u0026#34;Running in thread: \u0026#34; + Thread.currentThread().threadId()); 13}); 15. High Availability Deployment Active-Active - Two nodes of the service running in parallel, loadbalancer will route traffic to both. Active-Passive - The primary and secondary service running in parallel, with primary serving all the requests. If primary fails loadbalancer will route traffic to secondary and designate it as primary. 16. CDN \u0026amp; Edge Servers Content Delivery Network (CDN) reduce latency by keeping static content closer to the customer regions. Always remember to version your static content like css, images etc to help CDN differentiate between versions.\nPush CDN - Developer pushes the static content to CDN Pull CDN - First request pulls the static content to the CDN Edge Servers run compute operations closer to the customer region, eg: Streaming, Gaming etc.\n17. Message Broadcast Protocols Tell Everyone Everything - Too much traffic noise. Gossip - No guarantee that message has reached everyone. Distributed Cache - External entity required. Co-ordination service - Co-ordination appoints one person to inform everyone. Leader Election - Service appoints leaders in the group whose job is to inform them of any changes. 18. Kafka Kafka is a distributed \u0026amp; fault-tolerant, high throughput, scalable stream processing \u0026amp; messaging system.\nKafka as publisher-subscriber messaging system. Kafka as queue (point-point) messaging system. Kafka as stream processing system that reacts to event in realtime. Kafka as a store for data. Terms\nBroker: Kafka server. Cluster: A group of kafka brokers. Topic: Logical grouping of messages. Partition: A topic can contain many partitions. Messages are stored in a partition. Offset: Used to keep track of message. Consumer Group: Reads the messages from a topic. Consumer: A consumer group can have N consumers, each will read a partition. Consumers cant be more than number of partitions. Zookeeper: Used to track the offset, consumers, topics etc. Order is guaranteed only withing a partition and not across partitions. Within a consumer group a partition can be read only by one consumer. Leader replicates partition to other replica servers based on replication count. If leader fails then follower will become leader. Zookeeper manages all brokers, keeps track of offset, consumer group, topic, partitions etc. Once a message acknowledgement fails kafka will retry and even after certain retries if it fails, the message will be moved to dead letter. Kafka provides high throughput because of the following\nKafka scales because it works on append only mode, sequential disk write is faster than random access file write Kafka copies data from disk to network by ready with zero copy. OS buffer directly copies to NIC buffer. There is no set limit to the number of topics that can exist in a Kafka cluster, each partition has a limit of 4000 partitions per broker, maximum 200,000 partitions per Kafka cluster\nKafka Use-Cases\nActivity tracking for high traffic website Processing streaming big data Monitoring financial data in real time IoT sensor data processing https://gitorko.github.io/post/spring-apache-kafka/\nhttps://youtu.be/Cie5v59mrTg\nhttps://youtu.be/UNUz1-msbOM\n19. RabbitMQ RabbitMQ is a distributed message-broker that support various message protocols.\nAMQP (Advanced Message Queuing Protocol) STOMP (Streaming Text Oriented Messaging Protocol) MQTT (MQ Telemetry Transport) Models of communication\nQueue - Message published once \u0026amp; consumed once. Pub-Sub - Message published once consumed many times Retry Mechanism\nAuto-Ack - Broker will delete message after delivering it to consumer. Doesn't wait till consumer processes it. Manual-Ack - Broker will delete message only after consumer acknowledges processing it. After certain retry if it still fails then rejected messages will move to dead letter queue.\nRabbitMQ Distributed Setup\nCluster - Exchanges replicate to all servers. , all nodes need same version. Support bi-direction. Federation - Exchange on one broker publishes to an exchange on another. Many brokers on different version. Supports both uni and bi direction. Shovel plugin - similar to federation but works at low level. Difference\nCluster Federation Single logical broker Many brokers All nodes on same version All nodes on different version Bi-Direction topology Uni-Direction or Bi-Direction topology CP System (CAP) AP System (CAP) RabbitMQ vs Kafka\nRabbitMQ Kafka Push model Pull model Consumed event deleted, Less storage All events stored, More storage required Queues are single threaded Can scale based on consumer groups Smart broker (routing key) \u0026amp; Dumb Consumer Dumb broker \u0026amp; Smart Consumer (partition aware) No events replay Events can be read from any point Ordering guaranteed Ordering guaranteed only within partition https://www.upsolver.com/blog/kafka-versus-rabbitmq-architecture-performance-use-case\nhttps://tanzu.vmware.com/developer/blog/understanding-the-differences-between-rabbitmq-vs-kafka/\nhttps://youtu.be/O1PgqUqZKTA\n20. Redis Redis is an in-memory data store. Reading/writing to RAM is always faster than disk, hence it has high throughput and low latency. Redis employs a single-threaded architecture. Redis supports Non-blocking IO. Redis can deliver up to 1 million requests per second when run on an average Linux system.\nLimitation is that dataset cant be larger than memory (RAM)\nSince redis is single threaded there is no need for lock, no need for thread synchronization, no context switching, no time spent to create or destroy threads. It doesn't need multi thread because it uses I/O multiplexing where a single thread can wait on many socket connections for read/write. Redis cluster can be scaled even more with sharding.\nDatastructures supported\nString - (SDS, simple dynamic string) BitMap BitField Hash - (Hash Table, Zip List) List - (Link List, Zip List) Set - (Hash Table, IntSet) Sorted Set - (Skip List) Geospatial Hyperlog Stream Redis Persistence\nRDB (Redis Database): Performs point-in-time snapshots of your dataset at specified intervals. AOF (Append Only File): Logs every write operation received by the server. These operations can then be replayed again at server startup, reconstructing the original dataset. No persistence: persistence disabled. RDB + AOF: Combine both AOF and RDB. https://redis.io/docs/management/persistence/\nRedis Use-Cases\nCaching Session store Gaming leaderboards (SortedSet) Rate limiting (INCR - Counter \u0026amp; Setting TTL) Distributed Lock (SETNX - SET if Not exists) https://youtu.be/5TRFpFBccQM\n21. Stream processing vs Message processing Message Processing Stream Processing Messages are removed from queue after processing Append only log which can be processed from any point again No concept of windowing Data within a window matters, window can be 1 day, 1 year etc Push based Pull based Waits for ACK on delivery after push No need to wait for ACK as its pull based Slow consumer can lead to build up of queue Data is written to logs and read from logs Order not guaranteed Order guaranteed (within log partition) No downstream adapters Adapters provide options to route to other downstream endpoints eg: database https://blog.rabbitmq.com/posts/2021/07/rabbitmq-streams-overview\n22. JVM Memory \u0026amp; Garbage collectors In java memory allocated to program is cleaned up and recovered by the garbage collector. If this doesn't happen then your program will run out of memory space to execute. Garbage collection provides automation memory management in java. Objects are created on the heap in java.\nObjects get created on the heap.\nLive - Objects are being used and referenced from somewhere else Dead - Objects are no longer used or referenced from anywhere All objects are linked to a Garbage Root Object via graph. Garbage collector traverses the whole object graph in memory, starting from root and following references from the roots to other objects.\nPhases of Garbage Collection:\nMark - GC identifies the unused objects in memory Sweep - GC removes the objects identified during the previous phase Compact - Compacts fragmented space so that objects are in contiguous block Garbage Collections is done automatically by the JVM at regular intervals. It can also be triggered by calling System.gc(), but the execution is not guaranteed.\nGenerational garbage collection strategy that categorizes objects by age and moves them to different region.\nJVM is divided into three sections\nYoung Generation Old Generation Permanent Generation Young Generation\nNewly created objects start in the Young Generation. When objects are garbage collected from the Young Generation, it is a minor garbage collection event. When surviving objects reach a certain threshold of moving around the survivor spaces, they are moved to the Old Generation. Use the -Xmn flag to set the size of the Young Generation\nThe Young Generation is further subdivided\nEden space - All new objects start here, and initial memory is allocated to them Survivor spaces - Objects are moved here from Eden after surviving one garbage collection cycle. Old Generation\nObjects that are long-lived are eventually moved from the Young Generation to the Old Generation When objects are garbage collected from the Old Generation, it is a major garbage collection event.\nUse the -Xms and -Xmx flags to set the size of the initial and maximum size of the Heap memory.\nPermanent Generation\nDeprecated since java 8 Metadata of classes and methods are stored in perm-gen.\nMetaSpace\nStarting with Java 8, the MetaSpace memory space replaces the PermGen space. Metaspace is automatically resized hence applications won't run out of memory if the classes are big.\nPhases of GC\nMinor GC - Happens on Young generation. Major GC - Happens on Old generation. Stop of the world event, program pauses till memory is cleaned. Least pause time is always preferred. Algorithms\nMark-Copy - Happens in Young generation Marks all live objects Then copies from eden space to survivor space (S1/S2), At any given point either S1 or S2 is always empty. Then entire eden space is treated as empty. Mark-Sweep-Compact - Happens in Old generation. Marks all live objects. Sweep/Reclaim all dead object. Releases memory Compaction - Move all live objects to left so that are next to each other in continuous block. Types of garbage collector:\n-XX:+UseSerialGC - Serial garbage collector. Single thread for both minor \u0026amp; major gc. XX:+UseParallelGC - Parallel garbage collector. Multiple thread for both minor gc \u0026amp; single/multiple thread for major gc. Doesn't run concurrently with application. The pause time is longest. eg: Batch jobs XX:+UseConcMarkSweepGC - CMS (Concurrent Mark \u0026amp; Sweep) Deprecated since java 9. Multiple thread for both minor \u0026amp; major gc. Concurrent Mark \u0026amp; Sweep. Runs concurrently with application to mark live objects. The pause time is minimal. eg: CPU intensive. -XX:+UseG1GC - G1 (Garbage first) garbage collector. Entire heap is divided to multiple regions that can be resized. A region can be either young or old. Identifies the regions with the most garbage and performs garbage collection on that region first, it is called Garbage First The pause time is predictable as regions are small. -XX:+UseEpsilonGC - Epsilon collector - Do nothing collector. JVM shutsdown once heap is full. Used for zero pause time application provided memory is planned. -XX:+UseShenandoahGC - Shenandoah collector - Similar to G1, but runs concurrently with application. CPU intensive. -XX:+UseZGC - ZGC collector - Suitable for low pause time (2 ms pauses) and large heap. GC performed while application running. -XX:+UseZGC -XX:+ZGenerational Generation ZGC - ZGC splits the heap into two logical generations: one for recently allocated objects and another for long-lived objects. The GC can focus on collecting younger and more promising objects more often without increasing pause time, keeping them under 1 millisecond Garbage Collectors When to use Serial Small data sets (~100 MB max)Limited resources (e.g., single core)Low pause times Parallel Peak performance on multi-core systemsWell suited for high computational loads more than 1-second pauses are acceptable G1 /CMS Response time \u0026gt; throughputLarge heapPauses \u0026lt; 1 sec Shenandoah Minimize pause timesPredicatable latencies ZGC Response time is high-priority, and/orVery large heap Epsilon GC Performance testing and troubleshooting https://www.youtube.com/watch?v=2AZ0KKeXJSo\nhttps://www.youtube.com/watch?v=XXOaCV5xm9s\n23. Proxy vs Reverse-Proxy Proxy or forward proxy - Takes the connection out. Client is hidden. Reverse proxy - Brings the connection in. Server is hidden. 24. Load Balancer Software based eg: Nginx Hardware based eg: F5 Load balancer distributes traffic across multiple nodes ensuring high availability. Always create health check url that can determine if node is healthy or not, based on this the load balancer decides if the node is up or down.\nL3 - IP Based L4 - DNS Based L7 - Application Based Sticky sessions - Will assign the same user request to the same node in order to maintain the session state on the node. Ideally sticky session should be avoided, if the node goes down few users will experience outage. However in some cases sticky session will be easy to configure and setup.\n25. Load Balancer Routing Algorithms Round-robin - traffic distributed in round-robin fashion. Weighted Round-robin - traffic distributed by weight, some servers may be able to process more load hence their weight is more compared to smaller configuration machines. Least Connections - traffic is sent to server with the fewest current connections to clients. Least Response Time - traffic is sent to server with the fastest response time. Least Bandwidth - traffic is sent to server with the least Mbps of traffic. Hashing - traffic is sent to server based on a hash key. eg: client IP address hash, request URL hash. 26. NoSQL vs Relational DB NoSQL RDBMS Non-Relational DB Relational DB No predefined schema, handles un-structured data Require a schema, handles structured data Can scale across machines Cant scale across machines easily BASE Principle of eventual consistency ACID properties Structure can be Key-Value pairs, Document, Graph, Wide column Structure is Table based 27. CQRS - Command and Query Responsibility Segregation Pattern that separates read and update operations for a data store. Implementing CQRS in your application can maximize its performance, scalability, and security\n28. HTTP1 vs HTTP1.1 vs HTTP2 vs HTTP3 Protocol HTTP1 - one tcp connection per request HTTP1.1 - one tcp connection per request, keep alive connection so connection is not closed immediately. HTTP2 - one tcp connection for all requests. Multiplex all requests on one TCP. Server Push where the server proactively pushes css,js all on one TCP when the server requests the html file. HTTP3 - Uses QUIC protocol (based on UDP). Eg: Mobile that is changing cell towers, UDP continues to stream data without a new TCP handshake with the new tower. https://youtu.be/a-sBfyiXysI\n29. HTTPS \u0026amp; TLS Handshake Asymmetric encryption vs symmetric encryption\n30. Thundering Herd Problem When large number of request hit a server and cache doesn't contain the required data, lot time is spent going back and forth to update the cache, this can overwhelm the backend causing an outage which is called the thundering herd problem. To address this issue fail fast and update the cache after some random delay.\n31. Tsunami Traffic A streaming service is hosting a sports event. Millions of users suddenly login to watch the game as the game reaches the end causes a sudden surge in traffic.\nScaling up services takes time, Keep few services on standby if you anticipate heavy traffic. Configure auto-scaling based on key parameters. Scale on concurrent requests \u0026amp; not on CPU or memory usage. Design for scale down models as well along with scale up. Identify breaking point for each system. Plan for service denial via circuit breakers for new customers instead of system wide outage for all customers. 32. Serverless / FAAS (Function As A Service) Function is exposed as a service. Cost optimal, pay for only what you use. 33. Bloom filter Bloom filter is a probabilistic algorithm. Determines if given element is present in a set or not (member of set). In some cases it can give false positive, but will never give a false negative. More hash functions you use lesser the collisions, wider the bit array lesser the collisions. It is space efficient as it uses less memory.\nTo determine 'Member of set' No false negative but can give false positive Less memory used, entire bloom filter result can be sent over wire. Probabilistic algorithm Bloom Filter Use-Cases\nMalicious url detection in browser via bloom filter. CDN cache url, cache page only if 2nd request (member of set). Weak password detection. Username already taken. Cache only on 2nd request https://youtu.be/Bay3X9PAX5k\nhttps://youtu.be/V3pzxngeLqw\n34. Count-Min Sketch Count-Min Sketch is a probabilistic algorithm. Count frequency of event in streaming data, uses multiple hash to map frequency on to a matrix. Uses less space. In some cases it can over count due to hash collision but never under-count the events.\nCount frequency of events, range query, total, percentile. Uses less memory. Probabilistic algorithm. Every event is passed via multiple hash functions and respective matrix row/column updated. The frequency is determined by the minimum of all these counts. For more accuracy you can add more hash functions and wider column. In the example below hash generates numbers 0-6. Lesser hash functions will result in more collisions.\nhttps://youtu.be/ibxXO-b14j4\n35. BitMap Bit Array, Uses less memory Each bit holds value Using AND / OR operation can merge many bitmaps Eg: Daily site visitor count.\nhttps://youtu.be/8ZgRW0DNus4\n36. Locks \u0026amp; Contention Avoid locks if you want to scale, as they cause contention around shared resources Do pre-work instead of on-demand if it avoids contention. Eg: Issue 1 Billion tickets, instead of updating a row in DB with locks/syncronization, load a queue with all 1 Billion tickets and users can pick from queue. 37. Paxos \u0026amp; Raft To achieve Consensus over distribute system we use either paxos or raft algorithms.\nPaxos - Allows system to reach consensus based on majority votes. There are 2 ways to configure paxos\nNon-Leader - Client can send proposal, based on votes the consensus is reached. Since there is a lot of concurrency it can lead to conflicts/live-locks which is inefficient. Leader (Multi-Paxos) - Only one leader can send proposal, hence no live-locks present. Uses a ledger book to store requests in-case leader goes down. Highest server id will be the leader. Raft - Allows system to reach consensus based on what the leader says. After certain timeout the election for leader is held again. Each node stores a log (state information) that is replicated from the leader. Each node holds number of terms it has served as leader. If 2 systems get same votes during election, they will again carry out an election.\neg: Consul, etcd, Zookeeper\nhttps://youtu.be/fcFqFfsAlSQ\n38. CAP Theorem C - Consistency A - Availability P - Partition Tolerance CA System - Single Node MySql, Oracle CP System - Zookeeper AP System - Apache Couchbase, Cassandra https://youtu.be/KmGy3sU6Xw8\n39. ACID vs BASE transactions ACID\nAtomicity - All changes to data are performed as if they are a single operation, either all succeed or all fail. Consistency - Data is in a consistent state when a transaction starts and when it ends. Isolation - The intermediate state of a transaction is not visible to other transactions. Durability - Data persisted survives even if system restarted. BASE\nBasically Available - System guarantees availability. Soft State - The state of the system may change over time, even without input. Replication can take time so till then state is in soft-state. Eventual Consistency - The system will become consistent over a period of time 40. Database Scaling Read scaling - Replication, All writes goto one db node, which gets replicated to all read node db. (eventual consistency) Write scaling - Sharding 41. Partition vs Sharding Partitioning - Breaks up data into many smaller blocks within the same database server. Client need not be aware of partitions. Horizontal partition - Based on key the data is split. eg: All records for 2021 get written to partition_2021, all 2022 records get written to partition_2022 Vertical partition - Based on some column the data is split. eg: All the image blob of a profile are stored in a different table. Sharding - Breaks up data into many smaller blocks in different database servers. Client must be aware of shards. Cant do transactions or joins across shards. If data distribution is not uniform then will have to re-balance shards. eg: All customer records A-H go to database server1, all records I-Z go to database server2. When to Partition?\nWhen the table is too big for even indexes to search. Partition bring in improvement in query performance. When you need to purge old records as part of data management. Easier to drop partition than delete rows. Bulk loads and data deletion can be done much faster, as these operations can be performed on individual partitions. When to Shard?\nTo scale out horizontally. When there are too many writes. When data is transaction isolated, and you don't need to join across shards. If data is uniformly distributed among shards then query load is also equally distributed. Sharding on postgres using postgres_fdw extension.\n1CREATE TABLE customer 2( 3 id BIGSERIAL NOT NULL, 4 name VARCHAR(255) NOT NULL, 5 city_id INT NOT NULL, 6 created_on TIMESTAMP NOT NULL, 7); 8 9CREATE 10EXTENSION postgres_fdw; 11GRANT USAGE ON FOREIGN 12DATA WRAPPER postgres_fdw to app_user; 13CREATE 14SERVER shard02 FOREIGN DATA WRAPPER postgres_fdw 15 OPTIONS (dbname \u0026#39;postgres\u0026#39;, host \u0026#39;shard02\u0026#39;, port \u0026#39;5432\u0026#39;); 16CREATE 17USER MAPPING for app_user SERVER shard02 OPTIONS (user \u0026#39;app_username\u0026#39;, password \u0026#39;app_password\u0026#39;); 18 19CREATE 20FOREIGN TABLE customer_2021 PARTITION OF customer 21 FOR VALUES FROM (\u0026#39;2021-01-01\u0026#39;) TO (\u0026#39;2021-12-31\u0026#39;) 22 SERVER remoteserver01; 42. Partition Criteria Hash Based List Based Range Based Composite - multiple partitions under a partition Hash Partition\n1CREATE TABLE customer 2( 3 id BIGSERIAL NOT NULL, 4 name VARCHAR(255) NOT NULL, 5 city_id INT NOT NULL, 6 created_on TIMESTAMP NOT NULL, 7) PARTITION BY HASH (id); 8CREATE TABLE customer_even PARTITION OF customer FOR VALUES WITH (MODULUS 2,REMAINDER 0); 9CREATE TABLE customer_odd PARTITION OF customer FOR VALUES WITH (MODULUS 2,REMAINDER 0); Range Partition\n1CREATE TABLE customer 2( 3 id BIGSERIAL NOT NULL, 4 name VARCHAR(255) NOT NULL, 5 city_id INT NOT NULL, 6 created_on TIMESTAMP NOT NULL, 7) PARTITION BY RANGE (created_on); 8CREATE TABLE customer_2021 PARTITION OF customer FOR VALUES FROM 9( 10 \u0026#39;2021-01-01\u0026#39; 11) TO 12( 13 \u0026#39;2021-12-31\u0026#39; 14); 15CREATE TABLE customer_2022 PARTITION OF customer FOR VALUES FROM 16( 17 \u0026#39;2022-01-01\u0026#39; 18) TO 19( 20 \u0026#39;2022-12-31\u0026#39; 21); List Partition\n1CREATE TABLE customer 2( 3 id BIGSERIAL NOT NULL, 4 name VARCHAR(255) NOT NULL, 5 city_id INT NOT NULL, 6 created_on TIMESTAMP NOT NULL, 7) PARTITION BY LIST (EXTRACT(YEAR FROM created_on)); 8CREATE TABLE customer_2021 PARTITION OF customer FOR VALUES IN 9( 10 \u0026#39;2021\u0026#39; 11); 12CREATE TABLE customer_2022 PARTITION OF customer FOR VALUES IN 13( 14 \u0026#39;2022\u0026#39; 15); 43. Bulkhead pattern If one microservice is slow it can end up blocking threads and there by affecting all other microservices. Solution is to have dedicated thread pool for each client. It isolates dependencies, so that problem in one dependency doesn't affect others. A counter can also be used with max limits instead of creating different thread pool. Fail-Fast is preferred over slow service.\nIf the cart service is not-responding the threads will be blocked and waiting, since the thread pool is different the problem is isolated.\nhttps://youtu.be/R2FT5edyKOg\n44. Circuit Breaker If a service is down, we want to avoid continuously making calls to the service, till it gets time to recover. If the number of request failures are above a threshold then we decide to return a default response. After a certain period we will allow few requests to hit the service and if the response is good, we will allow all the traffic.\nStates\nOpen - No traffic is sent. Closed - All traffic is sent. Half-Open - After timeout only few calls are allowed. https://youtu.be/ADHcBxEXvFA\n45. Consistent Hashing Nodes keep dying in a distributed system. To scale new nodes can be added as well. Consistent hashing lets you distribute traffic among the nodes uniformly.\nWhy not use round-robin to distribute traffic? Services often cache some data or store local data, so it makes for a better design if the same client request is sent to the server which has all the data already cached/locally stored. If you send the same client request randomly to random servers each time then cache/local data is not utilized.\nConsistent hashing also prevents DOS attacks to some extent. If a spam client send random requests and round robin distributes it across all nodes then the outage is large scale. However with consitent hashing only certain node will be impacted.\nIf you just hash the request and map it to a server then if the node count changes all the requests will be impacted and will move to different servers. Hence in consistent hashing we hash both the request and the servers to a hash space and link them in a hash ring. With consistent hashing adding a new servers affects only few requests.\nThe distribution of servers in a hash ring may not be uniform hence you can use virtual servers. With more virtual servers the distribution is more balanced. Eg: if there are 60K user requests and there are 6 servers each server can distribute and handle 10K. Do note that if one node goes down then all the requests flood the next node causing it to go down thus causing a full outage. Virtual servers will avoid this to some extent.\nhttps://youtu.be/UF9Iqmg94tk\n46. Rate limit Token Bucket - Burst - Fixed token are added to bucket, bucket is always kept in full state. Can lead to burst of traffic. Token Bucket - Sustain - Constant token are added to bucket only if previous token are consumed. Smooth traffic. Leaky Bucket - Bucket size if fixed, if bucket full request are rejected, a processor de-queue bucket at fixed rate. Fixed Window - For the time period maintain a key,value pair (key=time, value=counter). If counter is greater than rate limit reject. Leads to burst traffic around edges of time period. eg: If rate limit is 10 per min, then 8 request come in the last 30 sec of min window and 8 more requests come in the first 30 second of next min window, within the window the rate limit is honored but we still processed 16 requests within a 1 min window. Sliding Log - Go over all previous nodes upto the time interval, in the link list and check rate limit exceeded, if yes then reject. Since the 1 min window keeps changing traffic is smooth unlike fixed window. Sliding Window Counter - Go over all previous nodes upto the time interval, in the link list and check if rate limit exceeded, if yes then reject. Instead of storing each request timestamp like sliding log, previous node stores the count. Places where rate limit can be applied\nhttps://youtu.be/9CIjoWPwAhU\nhttps://youtu.be/FU4WlwfS3G0\n47. Push vs Pull strategy RabbitMQ is push based, Kafka is pull based Push is expensive \u0026amp; real-time Pull is cheap but not real-time 48. NIO Majority of threads spend time waiting, NIO (non-blocking IO) take the approach of not blocking the threads. Eg: Spring Reactor\nNon-Blocking IO helps systems scale with fewer resources. The complete source to destination flow has to be non-blocking. 49. Multi-Tenancy Multiple customers share same resource/server but customers are not aware of each other and instances are isolated. eg: Kubernetes namespaces\n50. Authorization vs Authentication Authentication - Is the user allowed to use the system? Authorization - Does the user have the right role/permission to execute that operation? 51. Service Mesh \u0026amp; API Gateway API gateway is a component sits between clients and services and provides centralized handling of API communication between them. API Gateway authenticates all traffic before routing it to the called service\nService-to-service communication is essential in a distributed application but routing this communication, both within and across application clusters, becomes increasingly complex as the number of services grows. Service mesh enables managed, observable, and secure communication between individual services. It works with a service discovery protocol to detect services. Istio and envoy are some of the most commonly used service mesh frameworks.\nuser-to-service connectivity is called north-south connectivity, API gateway controls this communication. service-to-service connectivity is called east-west connectivity, service mesh controls this communication. Functions of API gateway\nService Discovery Load Balancing Circuit Breaker Distributed Tracing \u0026amp; Logging Telemetry Security - Authentication \u0026amp; Authorization Routing - Routing, circuit breaker, blue-green and canary deployments, load balancing, health checks, and custom error handling Observability Rate limiting Caching Request and Response Transformation API gateways can be augmented with web application firewall (WAF) and denial of service (DoS) protection. Depending on the system architecture and app delivery requirements, an API gateway can be deployed in front of the Kubernetes cluster as a load balancer (multi-cluster level), at its edge as an Ingress controller (cluster-level), or within it as a service mesh (service-level).\n52. Deployment Strategy Guidelines for deployment\nEnsure that database schema works with both new version and old version of the service. Provide health check url to determine if node is healthy. Ensure rollback works. Types of deployment\nRolling - Services are upgraded one after the other. Blue Green - Few services are upgraded and test teams validate and signoff before all services are upgraded. Canary - Few services are upgraded and part of the traffic hits these new instances. 53. GeoHashing \u0026amp; Quadtree GeoHashing\nGeohashing is a geocoding method used to encode geographic coordinates such as latitude and longitude into short alphanumeric strings. Coordinates lat 40.730610, long -73.935242. can be represented in geohash as af3bdmcef. By comparing strings we can tell if the 2 locations are closer to each other, depending on how many chars in string match.\neg: Geohashes af3bdmcef and af3bdmcfg are spatially closer as they share the prefix af3bdm.\nEasier to store in DB. Easier to share in URL. Easier to find nearest neighbour based on string match. QuadTree\nA quadtree is an in-memory tree data structure that is commonly used to partition a two-dimensional space by recursively subdividing it into four quadrants (grids) until the contents of the grids meet certain criteria. Internal node has exactly four children, only the leaf nodes store the actual value. Quadtrees enable us to search points within a two-dimensional range.\nEg: Identify all restaurants/cabs in the 1.5 miles/km range from given point. If there are no restaurants/cabs in the grid/node then add neighbouring grids/nodes.\n54. Event sourcing Event Notification - Only informs something changed. Upto client to look at data and pick the new changes Event Carried State Transfer - Event itself carries the data on what changed. Event Sourcing - All the changes of change are stored, if we replay the events we will get the final object. Instead of storing the update to an object/record, change the db to append only. Every change to the object/record is stored as a new entry in append fashion.\nEg: A customer record, each time address of customer changes instead of updating existing column, just insert new row with the new address. A materialized view can be generated from this data to get the latest customer record. Combining all the records gives latest customer record.\nUpdates can come from multiple sources, there is no contention to update. Consistency for transactional data based on the time the event was processed. Maintain full audit trails and history. Slower to generate the materialized view. 55. Attack surfaces To avoid security breaches, the objective of all systems must be to reduce the number of attack surfaces. More the components in your system, more the attack surfaces that need to be hardened.\nSecurity Hardening\nNetwork packet spoofing / eavesdropping - Someone on the same network can look at http packets using tools like wireshark, http packets are un-encrypted. Use https to prevent this attack Man-in-the-middle attack - Someone pretending to be the actual client, Use SSL authentication with symmetric encryption. Denial-Of-Service - Someone can overload your server and keep it busy so valid requests won't be processed. use rate limiting, IP blacklisting. Bot attack - Millions of bots can be made to looks like real traffic is hitting your service. Use re-captcha to identify real users. Storing CVV, passwords in DB - Avoid storing plain text passwords in DB. Always use salt (piece of random data added to a password before it is hashed and stored) Reading Passwords - Avoid assigning passwords to Strings, instead assign them to char array. String use string pool in java so the password are not garbage collected immediately and may show up in heap dumps. Firewall \u0026amp; ports - Enable firewall and open only the ports that are required. eg: close ftp port is not needed. Token expiry - Always set short expiry (TTL) for tokens, if compromised then the token will expire soon. Roles - Always provide only needed roles to users, so that even if password is compromised permissions restrict them from doing more damage. DMZ - Demilitarized zone, restrict backend servers from having direct access to internet. If backend servers need internet configure a forward proxy. SSH Tunneling - SSH to a primary server and then open a tunnel to the actual server. Auditing - Always ensure proper auditing and logging is available to trace any breaches. Backup \u0026amp; Checkpoint - Always ensure proper backups are available in case data needs reconciliation. Checkpoint run at short interval capturing the snapshot of the current system. 56. Kubernetes Kubernetes is a platform for managing containerized workloads.\nService discovery Load balancing Storage orchestration Automated rollout \u0026amp; rollback Resource usage enforcement Self-healing Secret \u0026amp; Configuration management https://gitorko.github.io/post/kubernetes-basics/\n57. Indexing - Btree, B+tree, BitMap Indexes help find the required data in large data set. Full table scan are costly hence reducing the search space is always preferred.\nBitMap index - A binary array to represent value, Uses less memory. Btree - Creates a balanced tree on insert. B+tree - Similar to btree but values are present only in the node. Improves range queries. Btree (Max Degree 3)\nB+tree (Max Degree 3)\nhttps://youtu.be/UzHl2VzyZS4 https://youtu.be/5-JYVeM3IQg\nhttps://www.cs.usfca.edu/~galles/visualization/BTree.html https://www.cs.usfca.edu/~galles/visualization/BPlusTree.html\n58. Data Race \u0026amp; Race conditions Data Race - Multiple threads access shared variable at same time without synchronization \u0026amp; at least one thread is writing, can cause corruption. Eg: Addition to long/double which are 64 bits.\nRace conditions - Multiple threads access shared variable, value of variable depends on execution order of threads. Atomic looking operations are not done atomically.\nRace conditions can be of 2 types\nCheck \u0026amp; Update - When two threads check if value present in map and put if absent.To prevent use locks or putIfAbsent atomic operations. Read \u0026amp; Update - When two threads read a value and increment it. Use locks or atomic variables. https://youtu.be/KGnXr62bgHM\n59. Merkel Tree Merkle tree also known as hash tree is a data structure used for data verification and synchronization. It's a tree data structure where each non-leaf node is a hash of its child nodes.\nIf the file is 100 GB then its chunked into 4 parts, A hash is calculated for each chunk and the merkle tree created. If any chunk of the file is corrupted then it's easy to detect it and fix it by comparing new merkle tree to the original merkle tree as the hash on corrupted side doesn't match.\nThis structure of the tree allows efficient mapping of huge data and small changes made to the data can be easily identified. If we want to know where data change has occurred then we will not have to traverse the whole structure but only a small part of the structure. The root hash is used as the fingerprint for the entire data. If root hash doesn't match then some data below has changed. 60. Pub-Sub vs Point-To-Point Message brokers allows systems to communicate with each other asynchronously. This ensures loose coupling between systems. Different messaging protocols AMQP, STOMP, MQTT can be used.\nPoint-to-Point messaging: Message sent to queue is sent to only one consumer. Publish-subscribe messaging: Message sent to the topic is sent to all subscribers. Guarantee that every message will only be delivered once.\n61. Availability Metrics Availability is the percentage of time that a system is operational (uptime). Measured in number of 9s. A service with 99.99% availability is described as having four 9s.\nAvailability (Percent) Downtime (Year) Downtime (Month) Downtime (Day) 99.9% (three nine) 8h 45m 43m 49s 1m 26s 99.99% (four nine) 52m 35s 4m 22s 8s https://uptime.is/\nSequence vs Parallel Availability\n62. Testing Functional testing\nUnit tests - Developers write tests that test only the specific function, interaction with DB or other services are mocked. Integration tests - Writing tests that interact with other components like DB or external services, validates system interactions. Functional tests - Similar to integration testing, but validates functionality, real use cases. Regression tests - Run by QE team, automation scripts that executes tests and validate against recurrence of known issues. User Acceptance tests (UAT) - Testing done by user/customer before accepting the system. Smoke test / Sanity test - Testing done in production after deployment. Non-Functional Testing\nPerformance \u0026amp; Scale test - Testing done by perf team to identify performance and scale issues. Security test - Testing done to ensure no security vulnerabilities exist. Usability test - Tests if the colors and button placement are good. Tracks user behaviour when using the system. Soak test - Runs suite of tests that run for longer period of time. eg: 2 days, 1 week etc. 63. REST POST is always for creating a resource (does not matter if it was duplicated) PUT is for checking if resource exists then update, else create new resource. PATCH is always for updating a resource. PUT is idempotent method means that the result of a successful performed request is independent of the number of times it is executed.\nMethod Description Idempotent GET Get a resource object Yes PUT Create a resource object or replace it Yes DELETE Delete a resource object Yes POST Create a new resource object No HEAD Return meta data of resource object Yes PATCH Apply partial update on resource object False OPTIONS Determine what HTTP methods are supported by a particular resource Yes 64. Types of database Relational Database - Each row is a record and column is a field in the record. eg: PostgresSQL, MySQL Columnar Database - Stores data by columns, handle write-heavy workloads. Eg: Apache Cassandra, HBase Document Database - Data is semi-structured, encoded in json, xml, bson eg: MongoDB, Couchbase Graph Database - Entities are represented as nodes and relations as edges, easier to perform complex relationship-based queries. eg: Neo4j, Amazon Neptune Key-Value Database - Data is stored in key value pairs, can be easily partitioned and scaled horizontally. eg: Redis, Amazon DynamoDB Time-Series Database - Optimized for timestamp data, comes with time based functions. eg: TimescaleDB 65. Domain Name System (DNS) Domain Name System (DNS) translates human-friendly domain names into their corresponding IP addresses\nLookup starts with the root server that points to the right TLD. Top-Level Domain (TLD) server points to the authoritative name server. Eg: TLD .com belongs to verisign The authoritative name server points to the zone or zone file that holds the DNS record. 66. Distributed File Systems Distributed file systems are storage solutions designed to manage and provide access to files and directories across multiple servers, nodes, or machines, often distributed over a network. eg: HDFS\n67. Full-text Search (Inverted Index) Full-text search enables users to search for specific words or phrases. Full-text search relies on an inverted index, which is a data structure that maps words or phrases to the documents in which they appear. An inverted index is an index data structure storing a mapping from content, such as words/numbers, to its locations in a document or a set of documents eg: Elasticsearch\nTwo types of inverted indexes\nRecord-Level: Contains a list of references to documents for each word. Word-Level: Contains the positions of each word within a document. Inverted index - A data structure used primarily for full-text search. It maps content, such as words or terms, to their locations in a database. Ideal for exact term-based full-text search, mapping terms to documents.\nTrigram Index - A type of index that helps in performing fast, efficient searches for substring matching and fuzzy matching by breaking down strings into a series of three-character sequences (trigrams). Suitable for approximate and fuzzy matching, indexing trigrams (three-character sequences) for efficient substring search.\n68. Backend for FrontEnd pattern (BFF) BFF is a variant of the API Gateway pattern, Instead of a single point of entry, it introduces multiple gateways. You can have a tailored API that targets the needs of each client (mobile, web, desktop, voice assistant, etc.) Decoupling of Backend and Frontend gives us faster time to market as frontend teams can have dedicated backend teams serving their unique needs. The release of new features of one frontend does not affect the other.\n69. View vs Materialized View CREATE VIEW - Virtual table based on the result set of a query. The data is not stored physically in the database; rather, the query is executed each time the view is accessed CREATE MATERIALIZED VIEW - Stores the result set of the query physically in the database. It is like a snapshot of the data at a particular point in time, Needs manual refresh to reflect changes in the underlying data A materialized view is a cached result of a complicated query. You can even add primary keys and indexes to this view.\n1CREATE VIEW active_customers AS 2SELECT id, name 3FROM customer 4WHERE status = \u0026#39;active\u0026#39;; 1CREATE MATERIALIZED VIEW active_customers AS 2SELECT id, name 3FROM customer 4WHERE status = \u0026#39;active\u0026#39; 5WITH DATA; 6 7REFRESH MATERIALIZED VIEW active_customers; 70. On-Prem vs IAAS vs PAAS vs SAAS Cloud models\nPrivate Cloud - Cloud computing model where the infrastructure is dedicated exclusively to a single organization. High control and security, suitable for regulated industries. Public Cloud - Cloud computing model where resources are shared among multiple organizations (tenants). Cost-effective and scalable, ideal for startups and fluctuating workloads. Hybrid Cloud - Cloud computing model that combines private and public clouds, allowing data and applications to be shared between them. Combines private and public clouds for flexibility and optimized resources. Multi Cloud - Involves using multiple public cloud services from different providers. This approach avoids vendor lock-in, increases redundancy, and leverages the best services from each provider. Uses multiple providers for vendor independence and optimized services. Infrastructure Models\nOn-Prem - All hardware and software are installed, managed, and maintained within the physical premises of an organization. Scalability limited by physical resources. Infrastructure as a Service (IaaS) - Provides virtualized computing resources over the internet. It offers fundamental IT resources such as virtual machines, storage, and networks. Eg: Amazon Web Services (AWS) EC2, Azure Virtual Machines, Google Compute Engine (GCE) Platform as a Service (PaaS) - Allows customers to develop, run, and manage applications without dealing with the infrastructure. It includes operating systems, middleware, and development tools. Eg: Cloud Foundry, Azure App Service, Google App Engine (GAE), Heroku, AWS Elastic Beanstalk, Red Hat OpenShift Software as a Service (SaaS) - Delivers software applications over the internet on a subscription basis. The provider manages everything from infrastructure to applications. Eg: Google Workspace, Microsoft 365 71. Map new HashMap() - is not a thread-safe data structure due to its non-synchronized nature. Collections.synchronizedMap() - provides a synchronized (thread-safe) map. It synchronizes all the methods to ensure that only one thread can access the map at a time. Synchronization can be a bottleneck if many threads access the map concurrently. ConcurrentHashMap - is designed for concurrent access and allows multiple threads to read and write without locking the entire map. It employs a finer-grained locking mechanism, which divides the map into segments to allow greater concurrency. Lock Striping\nLock striping is a technique used to improve the concurrency and performance of data structures by dividing the data into multiple segments, each protected by its own lock. This approach allows multiple threads to access different segments of the data structure simultaneously, reducing contention and increasing throughput.\nEg: ConcurrentHashMap, the data is divided into multiple segments, each with its own lock. When a thread needs to read or write to the map, it only needs to acquire the lock for the relevant segment, not the entire map. This allows other threads to access different segments concurrently. It divides the map into 16 segments by default (this can be configured), each with its own lock.\n73. Normalization vs De-Normalization Normalization - focuses on reducing redundancy and ensuring data integrity by organizing data into related tables. De-Normalization introduces redundancy to improve read performance and simplify queries by combining related tables. De-Normalized tables are preferred for high scalability as joins are costly operations. Foreign keys impact performance.\nDe-Normalized table\norder_id customer_id customer_name product_id product_name order_date 1 101 Alice 201 Phone 2024-07-01 2 102 Bob 202 Laptop 2024-07-12 Normalized table\ncustomer_id customer_name 101 Alice 102 Bob product_id product_name 201 Phone 202 Laptop order_id customer_id product_id order_date 1 101 201 2024-07-01 2 102 202 2024-07-12 Normalization Forms\n1NF (First Normal Form): No multi-valued attributes. Ensure atomic values and uniqueness.\nBefore:\nstudent_id name course 101 Alice Math, Science 102 Bob History, Math After:\nstudent_id name course 101 Alice Math 101 Alice Science 102 Bob History 102 Bob Math 2NF (Second Normal Form): Achieves 1NF and Remove partial dependencies.\nBefore:\nstudent_id and course_id is a composite primary key, course_instructor doesn't depend on student_id so partial dependency exists. Relation: (AB) (student_id+course_id) combined should determine C (course_instructor), A alone or B alone cant determine C.\nstudent_id course_id course_instructor 101 101 Dr. Smith 101 102 Dr. Jones 102 103 Dr. Brown After:\ncourse_id course course_instructor 101 Math Dr. Smith 102 Science Dr. Jones 103 History Dr. Brown student_id course_id 101 101 101 102 102 103 102 104 3NF (Third Normal Form): Achieve 2NF and Remove transitive dependencies.\nBefore:\ncourse_id and course is a composite primary key but phone number is associated with instructor which is not primary key. Relation: Transitive Dependency, A(course_id) determines -\u0026gt; B(course_instructor) which determines -\u0026gt; C(phone)\ncourse_id course course_instructor phone 101 Math Dr. Smith 999-978-9568 101 Science Dr. Jones 999-978-9468 103 History Dr. Brown 999-978-9368 After:\ncourse_id course course_instructor 101 Math Dr. Smith 101 Science Dr. Jones 103 History Dr. Brown course_instructor phone Dr. Smith 999-978-9568 Dr. Jones 999-978-9468 Dr. Brown 999-978-9368 BCNF (Boyce-Codd Normal Form): A stricter version of 3NF, ensuring that every determinant is a candidate key. Relation: Where A(instructor_id) determines B(course_instructor), then A is a super key\nAfter:\ncourse_id course instructor_id 101 Math 401 101 Science 402 103 History 403 instructor_id course_instructor phone 401 Dr. Smith 999-978-9568 402 Dr. Jones 999-978-9468 403 Dr. Brown 999-978-9368 404 Dr. Smith 777-978-9568 4NF: Remove multi-valued dependencies. No table should have more than one multi-valued dependency.\nBefore:\ncourse_instructor phone email Dr. Smith 999-978-9568 Dr. Jones 999-978-9468 Dr. Brown 999-978-9368 Dr. Smith sm@email.com Dr. Jones jn@email.com Dr. Brown br@email.com After:\ncourse_instructor email Dr. Smith sm@email.com Dr. Jones jn@email.com Dr. Brown br@email.com course_instructor phone Dr. Smith 999-978-9568 Dr. Jones 999-978-9468 Dr. Brown 999-978-9368 5NF: Decompose data into the smallest pieces without losing integrity. Lossless decomposition.\nBefore:\ncourse_instructor email Dr. Smith sm@email.com Dr. Jones jn@email.com Dr. Brown br@email.com course_instructor phone Dr. Smith 999-978-9568 Dr. Jones 999-978-9468 Dr. Brown 999-978-9368 After:\ninstructor_id course_instructor 501 Dr. Smith 502 Dr. Jones 503 Dr. Brown instructor_id email 501 sm@email.com 502 jn@email.com 503 br@email.com instructor_id phone 501 999-978-9568 502 999-978-9468 503 999-978-9368 Other Topics Normalization vs De-Normalization Federation First Level vs Second Level Cache Distributed tracing Observability - wavefront, prometheus, nagios Hadoop - Map Reduce CAS - compare and swap Client side load balancing GitOps \u0026amp; CI/CD Telemetry Block chain - distributed ledger Disaster recovery Auto scaling Batch vs Stream data processing vs Micro Batch Star vs Snow flake schema Time Series Database Hyperlog Elasticsearch OAuth 2.0 RPC, gRPC Rest vs SOAP vs GraphQL Scatter Gather Pattern CORS (Cross-origin resource sharing) P2P Network Tor network \u0026amp; VPN SOLID Design principles SSL vs TLS vs mTLS Storage types Hierarchy timing wheel RSync LSM tree Salt / Ansible JIT (Just in Time) compiler Operational transformation - Shared document edit Strangler pattern API versioning Backend for frontend (BFF) pattern Transaction propagation \u0026amp; rollback policy Adaptive Bitrate Streaming for video Scenarios Each of the usecases below highlights a good system design practice:\nAvoid making backend calls if possible. Avoid using contention for shared resources. Avoid updating row, consider inserts over updates. If possible create objects of representative resources instead of using counter Split the big task to smaller task, consider failure and retry Use a queue in cases where producer can produce more than consumer can consume Minimize the request-response time window. 1. Design a shopping cart application. Users should be able to browse the various products and add them to cart and buy them.\nIf the products are rendered on a web page for each request, then the system won't scale. Browsing products is more frequent than buying something. Generate a static website and upload to CDN, only the buy rest api calls hit the backend server. Home pages or landing pages which are frequently hit perform better if they are static sites and on the CDN. Even for user tailored home pages like Netflix, Hotstar etc, generate static sites per user and avoid actual backend calls as much as possible. Each service in a micro-service architecture needs to have its own database. The external payment gateway can fail to respond hence there must be job to periodically check if the payment failed and no response came. Once the order is placed the customer is redirected to the external payment gateway url with a callback url the gateway will call on success of payment. Tip If you can design a system where the calls never have to hit your backend service it improves the design. Eg: CDN, Edge Server, Cache etc. Look at client side caching as well if it means avoiding that backend call.\n2. Design a URL shortener service (Tiny URL) Users will provide a long url, your service needs to return a short url. If the users lookup the short url service needs to return the actual url.\nIf you generate a short url with UUID there can be collision if the key is same. More collisions more time is spent in returning a response degrading your service. The system will not scale. If the pre-created hash/short url code are stored in a RDBMS database there is contention at the db when all the threads ask for the next free key. Ensure that pre-created short url code are not sequential so that someone should not guess what the next key can be simply by incrementing one character. The ratio of read to write (READ:WRITE) are not same. For every 1 write the reads can be 1000. 1:1000. Someone creates a tiny url and shares it with is 1000 followers. There will be more reads compared to writes. Since we have more reads than writes we will use CQRS (Command and Query Responsibility Segregation) pattern here. The Generate key service will populate the queue with the short-url codes. Generate key service will ensure that duplicate keys are not loaded by generating short urls in range eg: A-F, G-N ranges. A consistent hashing service will handle the situation where we add more queues to the group or cases when queues die. The put operation first fetches a key from the queue, since there are multiple queues there is no contention to get a new key. It then writes the key \u0026amp; value to the database. If we need to scaling even more, we can use region specific sharding. The service will then need to be aware of the shards to write to and read from. eg: Asia vs North America shard. Nodes go down often, so if the queues die then there can be unused keys that are forever lost. We use a recovery task that runs nightly to recover any lost keys. Few other design approach suggest zookeeper to maintain range of keys, In the above design the service doesn't need to be aware of ranges hence we dont need Zookeeper or consensus manager. If the short url has to be generated on fly then you can use DB to know the ranges each node is handling, overhead of zookeeper doesn't justify the benefits. A tiny url fetches will have more probability of being queried again in short span hence cache will avoid the database call. Note Be aware of collisions in hashing (when hash is same), on a new environment there will be fewer collisions but as your data grows collisions will increase.\nTip Avoid contention for resources, contentions grow exponentially as system scales. The simple act of asking the DB for the next free record among a set, incrementing a particular row value are examples where contention can occur.\n3. Design a Youtube / Facebook like counter service Users can like a post/video and submit their likes, the service needs to count how many likes a post/video has.\nA single counter that needs to be updated by many threads always creates contention. Addition operation needs to be atomic making it difficult to scale. If you treat the counter as a row in the DB and use optimistic locking with retry logic to increment with exponential backoff you avoid locking the resource but there are multiple attempts to update the counter which causes scale issues. So relational database is out of picture. You can read more about 'Dynamic Striping' \u0026amp; Long Adder \u0026amp; Long Accumulator to get an idea how java does addition operation on scale. However this is restricted to a single instance. If you consider each like counter as a new row you avoid contention of an update but more time is spent in summing up the total by counting all rows. If the counter can be approx values, then you can use Count-Min Sketch approach. Redis provides atomic operations of increment. We dont want to keep a single video like counter on one node as it can overload it if there are more likes for that video compared to others. By using Round Robin we can scale our service by adding more redis nodes. We use a Queue event model to let the count aggregator service to sum the counts across all redis nodes and save that to a DB. The get count will always read the DB for latest count. There will be a slight delay from the time we submit the like till we see the count which is eventual consistency. The event queue payload can carry information about nodes that got updated, this way the aggregator service need not iterate over all redis nodes. Tip Avoid updating DB rows in most cases, updates don't scale. Always prefer using inserts/append over updates.\nTip To prevent race conditions optimistic or pessimistic locking need to be used and they dont scale. Use redis for atomic increment \u0026amp; decrement as they guarantee atomicity.\n4. Design an Advertisement Impression Service tied to a budget For a give budget, ads of a particular type are served. Once the budget is exhausted the ads should not be served. For the type shoes, Nike has a budget of 1000$ and Adidas has a budget of 500$. When a website wants to display an ad it calls your service which randomly returns an ad, ensuring that the budget is not exceeded. If each ad impression costs 1$ then you can do 1000 Ad impressions of Nike and 500 impression of Adidas.\nIt looks similar to the like counter service, where we can (atomic) decrement the budget based on the number of Ads being served. An incoming request randomly picks an Ad and decrements the budget for that Ad till it reaches 0. However such a design will still run into contention when scaled because of the decrement operation. The contention occurs when we want to decrement the budget, since we cant distribute the budget value across multiple nodes, the decrement operation still needs to happen on one node and in atomic fashion. Assume there is only 1 Nike brand with a budget of 1M. Now when there is huge load since there is only 1 brand and the budget needs to be decremented as an atomic operation, even though redis can do atomic decrement operations, it will still slow down the system since all the threads are waiting to decrement the single budget entity. A token seeder pre-populates dedicated queues with a single token. Based on the budget, an equal number of tokens are populated. Nike Queue will have 1000 token, Adidas Queue will have 500 tokens. When the request comes in for a shoe type Ad. A random queue is picked and a token dequeued. Based on that token the associated ad is served. Once the Queue is out of tokens no Ads are served for that brand. The Ad fetcher service needs to be aware of which queue to deque, it needs to be aware of how many queues are associated with the given brand. If a queue goes down the token seeder can identify and recreate a new queue based on the transactional log that is held by each service to identify how many tokens were already served. If there is a new brand that wants to join, just create the tokens and seed a queue and add it to the group. The next round-robin should pick it up. Tip Instead of incrementing/decrementing a counter, check if it's possible to create tokens ahead of time. With a bunch of tokens in a queue/bucket it's easier to scale than trying to update a single counter in atomic fashion.\n4. Design a Code Build \u0026amp; Deploy System Build the code when someone commits code to a branch and deploy it to a machine.\nBuilds can take long time to complete hence split the task into 2, if deploy fails we don't want to build again. Writing the records to DB would take more time compared to pushing to queue and polling the DB would need retry mechanism without wasting cpu cycles, hence using RabbitMQ is a better fit. Builds can take time, so we dont want the manager service constantly polling workers. Once the worker completes it will push an event that will be consumed by manager service to continue the deployment flow. If workers die during the build then heartbeat will not be updated and a scheduler can restart the job. If the build nodes make a direct connection for heart beat this can overwhelm the manager service as there will be many worker nodes. We maintain dedicated queues for each region. If one region is under heavy load we can add more consumers/workers to address that region. A periodic job checks for worker node heartbeat, if the TTL has expired then will restart the job. After the build is done the queue is updated, the next stage of deploy is started. Tip Split the tasks into smaller sub-tasks so that they can be restarted in case of failure.\n5. Design a large scale file de-duplication service You will receive a number of files (customer records) in a folder once a day, the file sizes range from 10GB-50GB that need to be de-duplicated based on few columns eg: Name \u0026amp; phone number column.\nProcessing a large file takes time. So chunking the file into manageable sizes helps distribute the task, and restart if some tasks fail. Avoid in-memory processing like Sets/Maps which can easily run out of memory. You can use a database with unique constraints, but this is write intensive task hence won't scale. Since the files arrive once a day, this is more batch oriented and not streaming task. Use a Bloom Filter a probabilistic data structure. This is used to test whether an element is a member of a set. There can be False-positive matches but no false negatives. Pick a big bit array \u0026amp; many hash functions to avoid collision this will avoid false positives as much as possible. Bloom filter bit array resides in memory hence ensure that the file is processed by the same service. If the bit array needs to be shared, use redis in-memory BITFIELD If false positive can not be avoided despite the large hash range, we can rely upon db unique constraints check as the 2nd level check to verify only records that are identified as duplicate. Tip Smaller tasks take less time, can be restarted/retried, can be distributed. Always check if the input data can be chunked \u0026amp; tasks made to smaller units instead of one big task.\nTip When there are more producers than consumers it will quickly overwhelm the system, use a queue to store and process the tasks asynchronously.\n6. Design a flash sale system You have limited items that are up for sale. You can expect a large number of users trying to buy the product by adding it to the shopping cart. You cant oversell or undersell.\nThe main objective is to keep the request-response window small. If the request waits (synchronous) till the operation of adding to cart is complete it will bring down the system. We will use a rabbitmq to queue the incoming burst of requests, hot-potato handling. As soon as the request to add to cart is received we will add it to the queue. Each user after placing the request to add to cart will be in wait state and query the status of his request. If the add to cart operation has to be completed within same request-response then use the same design as used in use case 4 Design an Advertisement Impression Service tied to a budget where you pre-allocate token on the queue.\nReal Implementation\nhttps://gitorko.github.io/flash-sale-system/\nTip Always minimize the request-response time window. The longer the request is kept open it will negatively impact the system.\n7. Design a chat server The chat server needs to support 1-1 and group text based chat. The client can be offline and will receive all the message when they are back online.\nPublish-Subscribe pattern, asynchronous in nature. We need to store the data to be read later when consumer is offline, hence Kafka seems a good fit, however Kafka topic and partition management introduces latency hence we use redis queue instead. We will split the command channel and data channel. Command channel only send the next call-back url and action to invoke. The client will fetch the data via HTTP call. Data traffic is heavy and hence will not overload the command bidirectional channel. We will partition the users based on region. Which region maps to which active service is maintained by config database. Each user will have a dedicated queue to which messages will written. The same messages will be written to the DB as well in append only mode. This can be done either by service writing to both or from queue-queue transfer (i.e persist-queue transfer to delivery-queue). In case the queue failure/user migration the message in DB which are not acknowledged will be reloaded to the queue. If the communication is uni-directional we can use SSE, since we want to send heartbeats we will use websocket which is bidirectional. The metadata can be stored in Relation database, which the message itself can be stored in Document Database. Real Implementation\nhttps://gitorko.github.io/chat-server/\nTip Split the communication channel to command and data channel.\n8. Design a Voting service https://gitorko.github.io/voting-system/\n9. Design a Stock Exchange (Price Time Priority Algorithm) https://gitorko.github.io/stock-exchange/\n10. Design a ticket booking system https://gitorko.github.io/ticket-booking-system/\nBehavioural Questions Use STAR approach to answer a question\nBehavioural questions try to understand if the candidate is fit for certain role\nOrg fit - Will hiring the candidate create issues for the organization? Team fit - Will hiring the candidate create conflict within teams? Role fit - Will candidate meet the role requirements? Questions\nWhy are you leaving your organization? What was the toughest problem you solved? How do you deal with an ill-tempered colleague? What do you do when your proposed design is shot-down by other peers? What do you do when a junior causes a production outage? Questions you can ask\nHow is the work culture? How often is the on-call \u0026amp; release cycles? What is the career growth path/levels at the org? Are the projects greenfield (new) or brownfield (existing)? How big are the teams and how many levels are there in the reporting structure? What is the cash:stock options in salary component? Tips\nAvoid blaming others. Take ownership for success \u0026amp; failure. Learn to delegate responsibilities \u0026amp; trust people to complete it. Accept feedback both positive \u0026amp; negative. Use the organization tools \u0026amp; procedures to handle conflicts that are beyond your control. Seek Peer or Mentor review/feedback when in doubt. Team accomplishment prioritizes over individual accomplishment. Treat others how you would like to be treated. Youtube Channels ByteByteGo\nHussein Nasser\nDefogTech\nTechDummiesNarendraL\nReferences https://github.com/checkcheckzz/system-design-interview\nhttps://github.com/mmcgrana/services-engineering\nhttps://github.com/resumejob/system-design-algorithms\nhttps://github.com/donnemartin/system-design-primer\nhttps://github.com/relogX/system-design-questions\nhttps://github.com/madd86/awesome-system-design\nhttps://github.com/karanpratapsingh/system-design\nhttps://tianpan.co/notes/2016-02-13-crack-the-system-design-interview\n","link":"https://gitorko.github.io/post/grokking-the-system-design-interview/","section":"post","tags":["interview","system-design"],"title":"Grokking the System Design Interview"},{"body":"","link":"https://gitorko.github.io/tags/system-design/","section":"tags","tags":null,"title":"System-Design"},{"body":"","link":"https://gitorko.github.io/categories/system-design/","section":"categories","tags":null,"title":"System-Design"},{"body":"","link":"https://gitorko.github.io/tags/kotlin/","section":"tags","tags":null,"title":"Kotlin"},{"body":"","link":"https://gitorko.github.io/categories/kotlin/","section":"categories","tags":null,"title":"Kotlin"},{"body":"A spring boot project with Kotlin\nGithub: https://github.com/gitorko/project03\nKotlin Kotlin is a cross-platform, statically typed, general-purpose high-level programming language with type inference. Kotlin is designed to interoperate fully with Java, and the JVM version of Kotlin's standard library depends on the Java Class Library, but type inference allows its syntax to be more concise.\nCode 1package com.demo.project03 2 3import com.demo.project03.domain.Customer 4import com.demo.project03.repo.CustomerRepository 5import org.slf4j.Logger 6import org.slf4j.LoggerFactory 7import org.springframework.boot.CommandLineRunner 8import org.springframework.boot.autoconfigure.SpringBootApplication 9import org.springframework.boot.runApplication 10import org.springframework.context.annotation.Bean 11 12@SpringBootApplication 13class Main { 14 private val log: Logger = LoggerFactory.getLogger(this::class.java) 15 16 @Bean 17 fun onStart(repo: CustomerRepository) = CommandLineRunner { 18 log.info(\u0026#34;Seeding!\u0026#34;) 19 val customer = Customer(0, \u0026#34;Jack\u0026#34;, 35) 20 repo.save(customer) 21 } 22} 23 24fun main(args: Array\u0026lt;String\u0026gt;) { 25 runApplication\u0026lt;Main\u0026gt;(*args) 26} 1package com.demo.project03.controller 2 3import com.demo.project03.domain.Customer 4import com.demo.project03.repo.CustomerRepository 5import org.slf4j.Logger 6import org.slf4j.LoggerFactory 7import org.springframework.beans.factory.annotation.Autowired 8import org.springframework.web.bind.annotation.GetMapping 9import org.springframework.web.bind.annotation.PostMapping 10import org.springframework.web.bind.annotation.RequestBody 11import org.springframework.web.bind.annotation.RestController 12 13@RestController 14class HomeController { 15 16 private val log: Logger = LoggerFactory.getLogger(this::class.java) 17 18 @Autowired 19 lateinit var repo: CustomerRepository 20 21 @GetMapping(\u0026#34;/customer\u0026#34;) 22 fun getCustomer(): MutableIterable\u0026lt;Customer\u0026gt; { 23 log.info(\u0026#34;Getting customers!\u0026#34;) 24 return repo.findAll() 25 } 26 27 @PostMapping(\u0026#34;/customer\u0026#34;) 28 fun save(@RequestBody customer: Customer): Customer { 29 return repo.save(customer) 30 } 31} 1package com.demo.project03.domain 2 3import jakarta.persistence.* 4import java.time.LocalDateTime 5 6@Entity 7@Table(name = \u0026#34;customer\u0026#34;) 8data class Customer( 9 @Id 10 @GeneratedValue(strategy = GenerationType.IDENTITY) 11 val id: Long = 0, 12 val name: String, 13 val age: Int, 14 val createdAt: LocalDateTime = LocalDateTime.now(), 15) 1package com.demo.project03.repo 2 3import com.demo.project03.domain.Customer 4import org.springframework.data.jpa.repository.JpaRepository 5import org.springframework.stereotype.Repository 6 7@Repository 8interface CustomerRepository : JpaRepository\u0026lt;Customer, Long\u0026gt; Postman Import the postman collection to postman\nPostman Collection\nSetup 1# Project03 2 3Kotlin Spring Boot Rest 4 5[https://github.com/gitorko/project03](https://github.com/gitorko/project03) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk version \u0026#34;21.0.3\u0026#34; 2024-04-16 LTS 14``` 15 16### Postgres DB 17 18``` 19docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:14 20docker ps 21docker exec -it pg-container psql -U postgres -W postgres 22CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 23CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 24grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 25 26docker stop pg-container 27docker start pg-container 28``` 29 30### Dev 31 32To run the backend in dev mode. 33 34```bash 35./gradlew clean build 36./gradlew bootRun 37``` 38 39### Prod 40 41To run as a single jar, both UI and backend are bundled to single uber jar. 42 43```bash 44./gradlew bootJar 45java -jar build/libs/project03-1.0.0.jar 46``` References https://kotlinlang.org/docs/jvm-spring-boot-restful.html\n","link":"https://gitorko.github.io/post/spring-kotlin/","section":"post","tags":["spring","kotlin"],"title":"Spring Boot - Kotlin"},{"body":"","link":"https://gitorko.github.io/tags/order-matching/","section":"tags","tags":null,"title":"Order-Matching"},{"body":"","link":"https://gitorko.github.io/tags/price-time-algorithm/","section":"tags","tags":null,"title":"Price-Time-Algorithm"},{"body":"A Stock Exchange system developed with Spring Boot, Spring JPA and Angular (Clarity) frontend. Implements the price-time-priority algorithm\nGithub: https://github.com/gitorko/project100\nQuick Overview To deploy the application in a single command, clone the project, make sure no conflicting docker containers or ports are running and then run\n1git clone https://github.com/gitorko/project100 2cd project100 3docker-compose -f docker/docker-compose.yml up Open http://localhost:8080/\nRequirements Design a stock exchange system for various tickers, user can place buy and sell orders.\nFunctional Requirements Buy \u0026amp; Sell orders must be processed based on priority of time when they were placed. Priority must be given to clear order placed first. FIFO (First-In-First-Out) Order buy/sell will be whole quantity. Order can be split but buy/sell has to be complete. Can't have partial buy/sell in a single order. Case 1 : Order match in sequential time order (FIFO) Event Time Type Price Qty Status 10:00 AM SELL 10.0 100 COMPLETED 10:01 AM SELL 10.0 200 COMPLETED 10:02 AM SELL 10.0 300 SUBMIT 10:03 AM BUY 10.0 300 COMPLETED Case 2 : Order match in sequential time order but preference to order fulfillment Event Time Type Price Qty Status 10:00 AM SELL 10.0 100 COMPLETED 10:01 AM SELL 10.0 200 10:02 AM SELL 10.0 300 COMPLETED SUBMIT 10:03 AM BUY 10.0 400 COMPLETED Case 3 : First sell order too small, will never fulfill blocking others, so will be skipped Event Time Type Price Qty Status 10:00 AM SELL 10.0 100 10:01 AM SELL 10.0 200 COMPLETED 10:02 AM SELL 10.0 300 COMPLETED SUBMIT 10:03 AM BUY 10.0 500 Case 4 : First sell order too big, will never fulfill blocking others, so will be skipped Event Time Type Price Qty Status 10:00 AM SELL 10.0 1000 10:01 AM SELL 10.0 200 COMPLETED 10:02 AM SELL 10.0 300 COMPLETED SUBMIT 10:03 AM BUY 10.0 500 COMPLETED Case 5 : Middle sell order too big, will never fulfill blocking others, so will be skipped Event Time Type Price Qty Status 10:00 AM SELL 10.0 100 COMPLETED 10:01 AM SELL 10.0 2000 10:02 AM SELL 10.0 300 COMPLETED SUBMIT 10:03 AM BUY 10.0 400 COMPLETED Case 6 : Order match when price is different One seller wanted to sell at 9$ but we can fulfill order at 10$ as there is a buyer. So all sellers gets 10$ It's ok for seller to get above asking price but not go below the asking price.\nEvent Time Type Price Qty Status 10:00 AM SELL 10.0 100 COMPLETED 10:01 AM SELL 10.0 200 10:02 AM SELL 9.0 300 COMPLETED SUBMIT 10:03 AM BUY 10.0 400 COMPLETED Case 7 : Order match when price is different Two sellers wanted to sell at 9$ \u0026amp; 8$ but we can fulfill order at 10$ as there is a buyer. So all sellers gets 10$ It's ok for seller to get above asking price but not go below the asking price.\nEvent Time Type Price Qty Status 10:00 AM SELL 10.0 100 COMPLETED 10:01 AM SELL 9.0 200 10:02 AM SELL 8.0 300 COMPLETED SUBMIT 10:03 AM BUY 10.0 400 COMPLETED Case 8 : Order match when price is different There is a cheaper sell order of 8$ however due to time preference (FIFO) we complete the order with the 10$ \u0026amp; 9$\nEvent Time Type Price Qty Status 10:00 AM SELL 10.0 100 COMPLETED 10:01 AM SELL 9.0 200 COMPLETED 10:02 AM SELL 8.0 300 SUBMIT 10:03 AM BUY 10.0 300 COMPLETED Case 9 : Order can't be fulfilled Event Time Type Price Qty Status 10:00 AM SELL 10.0 100 10:01 AM SELL 10.0 200 10:02 AM SELL 10.0 300 SUBMIT 10:03 AM BUY 10.0 50 Case 10 : Sell orders at the same time \u0026amp; same price Event Time Type Price Qty Status 10:00 AM SELL 10.0 200 COMPLETED 10:00 AM SELL 10.0 100 10:00 AM SELL 10.0 200 COMPLETED SUBMIT 10:01 AM BUY 10.0 400 COMPLETED Case 11 : Sell orders at the same time \u0026amp; different price Event Time Type Price Qty Status 10:00 AM SELL 10.0 200 COMPLETED 10:00 AM SELL 9.0 100 10:00 AM SELL 8.0 200 COMPLETED SUBMIT 10:01 AM BUY 10.0 400 COMPLETED Cases for Buy order is similar/inverse of the above cases.\nCase 12 : No fractional order fulfillment Event Time Type Price Qty Status 10:00 AM SELL 10.0 200 SUBMIT 10:00 AM BUY 10.0 100 If fractional order could be fulfilled then you could look at implementing the algorithm with a Priority Queue (Min Heap \u0026amp; Max Heap). Where heap is sorted by price and then by time. In such a case the algorithm becomes simple and just insertion and deletion to heap can be done in constant time. If heaps are implemented then in the above case, if there are only 2 transactions in the entire day. Then the seller will only be able to sell 100 items out of 200. Since there cant be fractional buy/sell we will not use Priority Queue (Heaps).\nNon-Functional Requirements Latency should be low. System should be highly available \u0026amp; survive restarts System should scale well when number of orders increases. Should be able to distribute the service with sticky affinity for one type of ticker. Implementation Design Real world trading algorithms are more complex, involve more memory \u0026amp; cpu optimized data structures, and can handle huge volumes. Most trading systems are written in C/C++.\nThe order matching algorithm uses backtracking which is limited by the recursive stack depth. If the CombinationSum backtracking job can be further split and scheduled across different worker nodes the throughput will increase further.\nIt uses single thread per ticker and is limited by the thread pool size to support more tickers. No synchronization is required as its a single thread model per ticker\nSystem is able to match \u0026amp; process 5,000 unique orders per ticker in 2 mins that roughly 40+ matching transactions per second per ticker on a Mac Laptop\nCode 1package com.demo.project100.service; 2 3import java.time.LocalDate; 4import java.time.LocalDateTime; 5import java.util.HashMap; 6import java.util.Random; 7 8import com.demo.project100.config.MyConfig; 9import com.demo.project100.domain.OpenOrder; 10import com.demo.project100.domain.SellType; 11import com.demo.project100.domain.SettledOrder; 12import com.demo.project100.repo.OpenOrderRepository; 13import com.demo.project100.repo.SettledOrderRepository; 14import com.demo.project100.repo.SettlementSummaryRepository; 15import lombok.RequiredArgsConstructor; 16import lombok.extern.slf4j.Slf4j; 17import org.springframework.data.domain.Page; 18import org.springframework.data.domain.Pageable; 19import org.springframework.stereotype.Service; 20 21@Service 22@RequiredArgsConstructor 23@Slf4j 24public class OrderService { 25 26 private final SettledOrderRepository settledOrderRepository; 27 private final SettlementSummaryRepository settlementSummaryRepository; 28 private final OpenOrderRepository openOrderRepository; 29 private final EventProcessor eventProcessor; 30 private final MyConfig myConfig; 31 32 /** 33 * Save the order to db. 34 * Then queue the order for settlement i.e find a matching order to complete it. 35 */ 36 public OpenOrder placeOrder(OpenOrder orderItem, Boolean settle) { 37 orderItem.setOrderDate(LocalDateTime.now()); 38 OpenOrder savedOrder = openOrderRepository.save(orderItem); 39 if (settle) { 40 orderItem.setSettle(true); 41 } 42 eventProcessor.queueOrder(savedOrder); 43 return savedOrder; 44 } 45 46 /** 47 * Get all the active orders from the db, to load them to in-memory data structure. 48 * This can happen when system crashes and needs to restart 49 */ 50 public Page\u0026lt;OpenOrder\u0026gt; findOpenOrdersForDay(Pageable pageable) { 51 return openOrderRepository.findAllByOrderDateBetween(LocalDate.now().atStartOfDay(), LocalDate.now().plusDays(1).atStartOfDay(), pageable); 52 } 53 54 public Page\u0026lt;SettledOrder\u0026gt; findSettledOrdersForDay(Pageable pageable) { 55 return settledOrderRepository.findAllByOrderDateBetween(LocalDate.now().atStartOfDay(), LocalDate.now().plusDays(1).atStartOfDay(), pageable); 56 } 57 58 public void reset() { 59 log.info(\u0026#34;Resetting!\u0026#34;); 60 settledOrderRepository.deleteAll(); 61 openOrderRepository.deleteAll(); 62 settlementSummaryRepository.deleteAll(); 63 myConfig.setCache(new HashMap\u0026lt;\u0026gt;()); 64 } 65 66 /** 67 * Different number of buy and sell orders 68 */ 69 public void simulationRandom(int records) { 70 log.info(\u0026#34;Random Simulation for: {}!\u0026#34;, records); 71 Random random = new Random(); 72 for (int i = 0; i \u0026lt; records; i++) { 73 boolean sell = random.nextBoolean(); 74 if (sell) { 75 eventProcessor.simulationRandom(this, SellType.SELL); 76 } else { 77 eventProcessor.simulationRandom(this, SellType.BUY); 78 } 79 } 80 } 81 82 /** 83 * Simulate orders 84 */ 85 public void simulate(int records, SellType sellType) { 86 log.info(\u0026#34;Simulate for: {}!\u0026#34;, records); 87 for (int i = 0; i \u0026lt; records; i++) { 88 eventProcessor.simulate(this, sellType); 89 } 90 } 91} 1package com.demo.project100.service; 2 3import java.time.LocalDateTime; 4import java.util.ArrayList; 5import java.util.List; 6import java.util.Map; 7import java.util.concurrent.BlockingQueue; 8import java.util.concurrent.LinkedBlockingDeque; 9import jakarta.transaction.Transactional; 10 11import com.demo.project100.domain.OpenOrder; 12import com.demo.project100.domain.SellType; 13import com.demo.project100.domain.SettledOrder; 14import com.demo.project100.domain.SettlementSummary; 15import com.demo.project100.domain.Status; 16import com.demo.project100.pojo.OrderChain; 17import com.demo.project100.pojo.OrderMap; 18import com.demo.project100.repo.OpenOrderRepository; 19import com.demo.project100.repo.SettledOrderRepository; 20import com.demo.project100.repo.SettlementSummaryRepository; 21import lombok.Data; 22import lombok.SneakyThrows; 23import lombok.extern.slf4j.Slf4j; 24import org.springframework.beans.factory.annotation.Autowired; 25 26/** 27 * No need of spring bean annotation, this is injected as a prototype spring bean 28 */ 29@Slf4j 30@Data 31public class ProcessEngine { 32 33 //Unbounded blocking queue, will take as many orders as permitted by memory. 34 private BlockingQueue\u0026lt;OpenOrder\u0026gt; orderQueue = new LinkedBlockingDeque\u0026lt;\u0026gt;(); 35 private volatile boolean running; 36 37 private String ticker; 38 private OrderMap sellMap; 39 private OrderMap buyMap; 40 41 @Autowired 42 private SettledOrderRepository settledOrderRepository; 43 44 @Autowired 45 private SettlementSummaryRepository settlementSummaryRepository; 46 47 @Autowired 48 private OpenOrderRepository openOrderRepository; 49 50 @SneakyThrows 51 public void startProcessing() { 52 //Double check locking to avoid running thread more than once. 53 if (!running) { 54 synchronized (this) { 55 if (!running) { 56 running = true; 57 while (true) { 58 OpenOrder orderItem = orderQueue.take(); 59 log.info(\u0026#34;Processing order {}\u0026#34;, orderItem); 60 build(orderItem); 61 if (orderItem.isSettle()) { 62 //Triggers the matching process to find the relevant match order 63 boolean status = process(orderItem); 64 log.info(\u0026#34;Status of order: {}, {}\u0026#34;, orderItem.getId(), status); 65 } 66 } 67 } 68 } 69 } 70 } 71 72 public ProcessEngine(String ticker) { 73 this.ticker = ticker; 74 sellMap = new OrderMap(true); 75 buyMap = new OrderMap(); 76 } 77 78 public synchronized void reset() { 79 sellMap = new OrderMap(true); 80 buyMap = new OrderMap(); 81 } 82 83 /** 84 * Method is not synchronized as its a single thread execution model. 85 * If its multi-thread then there will be data structure corruption 86 * Single thread of execution per stock ticker to ensure order fulfillment is accurate. 87 */ 88 public void build(OpenOrder orderItem) { 89 Double key = orderItem.getPrice(); 90 if (orderItem.getType().equals(SellType.SELL)) { 91 OrderChain newNode; 92 if (sellMap.getPriceMap().containsKey(key)) { 93 //already exists 94 OrderChain currNode = sellMap.getLastNodeMap().get(key); 95 newNode = new OrderChain(orderItem, currNode, null); 96 currNode.setNext(newNode); 97 sellMap.getLastNodeMap().put(key, newNode); 98 } else { 99 //New node 100 newNode = new OrderChain(orderItem, null, null); 101 sellMap.getLastNodeMap().put(key, newNode); 102 sellMap.getPriceMap().put(key, newNode); 103 } 104 } else { 105 OrderChain newNode; 106 if (buyMap.getPriceMap().containsKey(key)) { 107 //already exists 108 OrderChain currNode = buyMap.getLastNodeMap().get(key); 109 newNode = new OrderChain(orderItem, currNode, null); 110 currNode.setNext(newNode); 111 buyMap.getLastNodeMap().put(key, newNode); 112 } else { 113 //New node 114 newNode = new OrderChain(orderItem, null, null); 115 buyMap.getLastNodeMap().put(key, newNode); 116 buyMap.getPriceMap().put(key, newNode); 117 } 118 } 119 } 120 121 /** 122 * Method is not synchronized as its a single thread execution model. 123 * If its multi-thread then there will be data structure corruption 124 * Single thread of execution per stock ticker to ensure order fulfillment is accurate. 125 */ 126 public boolean process(OpenOrder orderItem) { 127 if (orderItem.getType().equals(SellType.BUY)) { 128 return processOrder(orderItem, sellMap, buyMap, SellType.BUY); 129 } else { 130 return processOrder(orderItem, buyMap, sellMap, SellType.SELL); 131 } 132 } 133 134 private boolean processOrder(OpenOrder orderItem, OrderMap orderMap1, OrderMap orderMap2, SellType sellType) { 135 List\u0026lt;OrderChain\u0026gt; resultOrderChains = new ArrayList\u0026lt;\u0026gt;(); 136 if (orderMap1.getPriceMap().size() \u0026gt; 0) { 137 //Short circuit and link all nodes in one long continuous chain. 138 List\u0026lt;OrderChain\u0026gt; revertList = new ArrayList\u0026lt;\u0026gt;(); 139 140 OrderChain previous = null; 141 for (Map.Entry\u0026lt;Double, OrderChain\u0026gt; entry : orderMap1.getPriceMap().entrySet()) { 142 if (previous != null) { 143 revertList.add(previous); 144 previous.setNext(orderMap1.getPriceMap().get(entry.getKey())); 145 } 146 if (entry.getKey() \u0026lt;= orderItem.getPrice()) { 147 previous = orderMap1.getLastNodeMap().get(entry.getKey()); 148 } 149 } 150 151 //Find if order can be fulfilled 152 resultOrderChains = new CombinationSum().combinationSum(orderMap1.getPriceMap().get(orderItem.getPrice()), orderItem.getQuantity()); 153 154 //Reset the short circuiting. 155 for (OrderChain revertItem : revertList) { 156 revertItem.setNext(null); 157 } 158 } 159 160 if (resultOrderChains.size() \u0026gt; 0) { 161 162 //Clean the Map2 163 OrderChain orderItemNode = orderMap2.getPriceMap().get(orderItem.getPrice()); 164 if (orderItemNode != null) { 165 if (orderItemNode.getPrevious() == null \u0026amp;\u0026amp; orderItemNode.getNext() == null) { 166 //If its the only node then delete the map key 167 orderMap2.getPriceMap().remove(orderItemNode.getItem().getPrice()); 168 orderMap2.getLastNodeMap().remove(orderItemNode.getItem().getPrice()); 169 } else if (orderItemNode.getPrevious() == null \u0026amp;\u0026amp; orderItemNode.getNext() != null) { 170 //If its the first node then point head to next node. 171 OrderChain newHead = orderItemNode.getNext(); 172 newHead.setPrevious(null); 173 orderItemNode.setNext(null); 174 orderMap2.getPriceMap().put(newHead.getItem().getPrice(), newHead); 175 //Set the currNode 176 orderMap2.getLastNodeMap().put(newHead.getItem().getPrice(), newHead); 177 } else if (orderItemNode.getPrevious() != null \u0026amp;\u0026amp; orderItemNode.getNext() != null) { 178 //If node in middle, break both links 179 OrderChain newNext = orderItemNode.getNext(); 180 OrderChain newPrevious = orderItemNode.getPrevious(); 181 newPrevious.setNext(newNext); 182 newNext.setPrevious(newPrevious); 183 orderItemNode.setPrevious(null); 184 orderItemNode.setNext(null); 185 } else if (orderItemNode.getPrevious() != null \u0026amp;\u0026amp; orderItemNode.getNext() == null) { 186 //Last node 187 OrderChain previousNode = orderItemNode.getPrevious(); 188 previousNode.setNext(null); 189 orderItemNode.setPrevious(null); 190 //Set the currNode 191 orderMap2.getLastNodeMap().put(previousNode.getItem().getPrice(), previousNode); 192 } 193 } 194 195 //Break the links \u0026amp; clean Map1 196 for (OrderChain orderChain : resultOrderChains) { 197 if (orderChain.getPrevious() == null \u0026amp;\u0026amp; orderChain.getNext() == null) { 198 //If its the only node then delete the map key 199 orderMap1.getPriceMap().remove(orderChain.getItem().getPrice()); 200 orderMap1.getLastNodeMap().remove(orderChain.getItem().getPrice()); 201 } else if (orderChain.getPrevious() == null \u0026amp;\u0026amp; orderChain.getNext() != null) { 202 //If its the first node then point head to next node. 203 OrderChain newHead = orderChain.getNext(); 204 newHead.setPrevious(null); 205 orderChain.setNext(null); 206 orderMap1.getPriceMap().put(newHead.getItem().getPrice(), newHead); 207 //Set the currNode 208 orderMap1.getLastNodeMap().put(newHead.getItem().getPrice(), newHead); 209 } else if (orderChain.getPrevious() != null \u0026amp;\u0026amp; orderChain.getNext() != null) { 210 //If node in middle, break both links 211 OrderChain newNext = orderChain.getNext(); 212 OrderChain newPrevious = orderChain.getPrevious(); 213 newPrevious.setNext(newNext); 214 newNext.setPrevious(newPrevious); 215 orderChain.setPrevious(null); 216 orderChain.setNext(null); 217 } else if (orderChain.getPrevious() != null \u0026amp;\u0026amp; orderChain.getNext() == null) { 218 //Last node 219 OrderChain previousNode = orderChain.getPrevious(); 220 previousNode.setNext(null); 221 orderChain.setPrevious(null); 222 //Set the currNode 223 orderMap1.getLastNodeMap().put(previousNode.getItem().getPrice(), previousNode); 224 } 225 } 226 227 List\u0026lt;OpenOrder\u0026gt; result = new ArrayList\u0026lt;\u0026gt;(); 228 for (OrderChain orderChain : resultOrderChains) { 229 result.add(orderChain.getItem()); 230 } 231 completeOrder(orderItem, result, sellType); 232 return true; 233 } 234 return false; 235 } 236 237 @Transactional 238 public void completeOrder(OpenOrder openOrder, List\u0026lt;OpenOrder\u0026gt; resultOrders, SellType sellType) { 239 List\u0026lt;SettledOrder\u0026gt; completeItems = new ArrayList\u0026lt;\u0026gt;(); 240 List\u0026lt;SettlementSummary\u0026gt; settlementSummaries = new ArrayList\u0026lt;\u0026gt;(); 241 List\u0026lt;Long\u0026gt; deleteOrderIds = new ArrayList\u0026lt;\u0026gt;(); 242 deleteOrderIds.add(openOrder.getId()); 243 244 SettledOrder settledOrder = SettledOrder.builder() 245 .id(openOrder.getId()) 246 .ticker(openOrder.getTicker()) 247 .price(openOrder.getPrice()) 248 .type(openOrder.getType()) 249 .quantity(openOrder.getQuantity()) 250 .orderDate(openOrder.getOrderDate()) 251 .executedDate(LocalDateTime.now()) 252 .status(Status.COMPLETED) 253 .build(); 254 completeItems.add(settledOrder); 255 256 for (OpenOrder item : resultOrders) { 257 deleteOrderIds.add(item.getId()); 258 SettledOrder localOrderItem = SettledOrder.builder() 259 .id(item.getId()) 260 .ticker(item.getTicker()) 261 .price(item.getPrice()) 262 .type(item.getType()) 263 .quantity(item.getQuantity()) 264 .orderDate(item.getOrderDate()) 265 .executedDate(LocalDateTime.now()) 266 .status(Status.COMPLETED) 267 .build(); 268 completeItems.add(localOrderItem); 269 } 270 log.debug(\u0026#34;Found Match {}\u0026#34;, completeItems); 271 272 if (settledOrder.getType().equals(SellType.BUY)) { 273 for (SettledOrder item : completeItems) { 274 if (!item.getType().equals(SellType.BUY)) { 275 //Its ok for seller to get above asking price but not go below the asking price. 276 settlementSummaries.add(SettlementSummary.builder() 277 .buyOrderId(settledOrder.getId()) 278 .sellOrderId(item.getId()) 279 .price(item.getPrice()) 280 .quantity(item.getQuantity()) 281 .sale(item.getPrice() * item.getQuantity()) 282 .build()); 283 } 284 } 285 } else { 286 for (SettledOrder item : completeItems) { 287 if (!item.getType().equals(SellType.SELL)) { 288 //Its ok for buyer to get below asking price but not go above the asking price. 289 settlementSummaries.add(SettlementSummary.builder() 290 .buyOrderId(item.getId()) 291 .sellOrderId(settledOrder.getId()) 292 .price(settledOrder.getPrice()) 293 .quantity(item.getQuantity()) 294 .sale(settledOrder.getPrice() * item.getQuantity()) 295 .build()); 296 } 297 } 298 } 299 settledOrderRepository.saveAll(completeItems); 300 settlementSummaryRepository.saveAll(settlementSummaries); 301 openOrderRepository.deleteAllById(deleteOrderIds); 302 } 303 304} 1package com.demo.project100.service; 2 3import java.util.ArrayList; 4import java.util.Collections; 5import java.util.List; 6 7import com.demo.project100.pojo.OrderChain; 8 9public class CombinationSum { 10 List\u0026lt;OrderChain\u0026gt; result; 11 12 public List\u0026lt;OrderChain\u0026gt; combinationSum(OrderChain orderChain, int target) { 13 this.result = new ArrayList\u0026lt;\u0026gt;(); 14 backtrack(orderChain, new ArrayList\u0026lt;\u0026gt;(), target); 15 return result; 16 } 17 18 private void backtrack(OrderChain orderChain, List\u0026lt;OrderChain\u0026gt; tempList, int remain) { 19 if (remain \u0026lt; 0 || result.size() \u0026gt; 0) { 20 return; 21 } else if (remain == 0) { 22 result = new ArrayList\u0026lt;\u0026gt;(tempList); 23 } else { 24 while (orderChain != null) { 25 tempList.add(orderChain); 26 backtrack(orderChain.getNext(), tempList, remain - orderChain.getItem().getQuantity()); 27 tempList.remove(tempList.size() - 1); 28 if (result.size() \u0026gt; 0) { 29 return; 30 } 31 orderChain = orderChain.getNext(); 32 } 33 } 34 } 35} Postman Import the postman collection to postman\nPostman Collection\nSetup 1# Project 100 2 3Stock Exchange - Price Time Priority Algorithm 4 5[https://gitorko.github.io/stock-exchange/](https://gitorko.github.io/stock-exchange/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk 17.0.3 2022-04-19 LTS 14 15node --version 16v16.16.0 17 18yarn --version 191.22.18 20``` 21 22### Postgres DB 23 24``` 25docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:9.6.10 26docker ps 27docker exec -it pg-container psql -U postgres -W postgres 28CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 29CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 30grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 31 32docker stop pg-container 33docker start pg-container 34``` 35 36### Dev 37 38To run the backend in dev mode. 39Postgres DB is needed to run the integration tests during build. 40 41```bash 42./gradlew clean build 43./gradlew bootRun 44``` 45 46To Run UI in dev mode 47 48```bash 49cd ui 50yarn install 51yarn build 52yarn start 53``` 54 55Open [http://localhost:4200/](http://localhost:4200/) 56 57### Prod 58 59To run as a single jar, both UI and backend are bundled to single uber jar. 60 61```bash 62./gradlew cleanBuild 63cd build/libs 64java -jar project100-1.0.0.jar 65``` 66 67Open [http://localhost:8080/](http://localhost:8080/) 68 69### Docker 70 71```bash 72./gradlew cleanBuild 73docker build -f docker/Dockerfile --force-rm -t project100:1.0.0 . 74docker images |grep project100 75docker tag project100:1.0.0 gitorko/project100:1.0.0 76docker push gitorko/project100:1.0.0 77docker-compose -f docker/docker-compose.yml up 78``` References https://clarity.design/\nhttps://en.wikipedia.org/wiki/Order_matching_system\n","link":"https://gitorko.github.io/post/stock-exchange/","section":"post","tags":["stock-exchange","price-time-algorithm","order-matching"],"title":"Stock Exchange - Price Time Priority Algorithm"},{"body":"","link":"https://gitorko.github.io/tags/stock-exchange/","section":"tags","tags":null,"title":"Stock-Exchange"},{"body":"","link":"https://gitorko.github.io/categories/stock-exchange/","section":"categories","tags":null,"title":"Stock-Exchange"},{"body":"","link":"https://gitorko.github.io/categories/design-pattern/","section":"categories","tags":null,"title":"Design-Pattern"},{"body":"","link":"https://gitorko.github.io/tags/enterprise-integration-patterns/","section":"tags","tags":null,"title":"Enterprise-Integration-Patterns"},{"body":"Spring Integration provides a framework to support Enterprise Integration Patterns.\nGithub: https://github.com/gitorko/project97\nSpring Integration Messaging support - All communication is treated as asynchronous messages sent between different channels. This provides loose coupling. Support of external system - Adapters for ftp,file system, rabbitMQ and other external systems. A higher level of abstraction over Spring’s support for remoting, messaging, and scheduling is provided so that developers dont have to write the code to interact with these systems but focus only on the business logic. At any point if the source changes from a file system to an ftp server, the changes required will be very minimal.\nSpring integration provides different ways to configure:\nXML approach to wire Bean annotation approach Java DSL approach (We will focus on DSL approach as its easier to read and maintain) Terminology Inbound Adapter - Real world object -\u0026gt; Message Outbound Adapter - Message -\u0026gt; Real world object Inbound Gateway - Real world object -\u0026gt; Spring Integration -\u0026gt; Real world object Outbound Gateway - Spring Integration -\u0026gt; Real world object -\u0026gt; Spring Integration Message can be split, route, transform, wiretap, enrich, aggregate the messages.\nMessage - Wrapper that can wrap a java object, contains payload \u0026amp; headers Message Channel - A conduit for transmitting messages between producers \u0026amp; consumers a. Point-to-Point channel - one consumer should receive each message from a channel b. Publish/Subscribe channel - Will broadcast the message to any subscriber listening to that channel. Message Transform Message Filter Message Router - Determines what channel or channels (if any) should receive the message next Message Bridge - Connects two message channels or channel adapters Splitter Aggregator Handle - ServiceActivator that handle the message. Adapter endpoints - Provide one-way integration. Gateway endpoints - Provide two-way request/response integration. Types of Message Channel: PollableChannel - Store messages, You need to ask for messages by polling i.e call receive method\na. QueueChannel - Pollable, FIFO, Channel has multiple consumers, only one of them will receive, can be made blocking. b. PriorityChannel - Pollable, Messages to be ordered within the channel based on priority, Channel has multiple consumers, only one of them will receive, can be made blocking. c. RendezvousChannel - Pollable, Synchronous, Similar to QueueChannel but zero capacity to buffer, direct-handoff scenario, wherein a sender blocks until consumer invokes the channel’s receive() method\nSubscribableChannel - Event driven, need to subscribe to get the messages. i.e call the subscribe method.\na. Direct Channel - Subscribable, single subscriber, single thread behaviour blocking the sender thread until the message is subscribed. b. Publish Subscribe Channel - Subscribable, many subscribers, all will get the message. c. FixedSubscriberChannel - Subscribable, single subscriber, subscriber that cannot be unsubscribed\nExecutorChannel - Delegates to an instance of TaskExecutor to perform the dispatch\nFluxChannel - Allows reactive consumption\nCode 1package com.demo.project97.integration; 2 3import java.util.Arrays; 4import java.util.List; 5 6import com.demo.project97.domain.Customer; 7import lombok.extern.slf4j.Slf4j; 8import org.springframework.context.annotation.Bean; 9import org.springframework.integration.annotation.BridgeFrom; 10import org.springframework.integration.annotation.IntegrationComponentScan; 11import org.springframework.integration.channel.DirectChannel; 12import org.springframework.integration.channel.PublishSubscribeChannel; 13import org.springframework.integration.channel.QueueChannel; 14import org.springframework.integration.dsl.IntegrationFlow; 15import org.springframework.integration.dsl.Pollers; 16import org.springframework.integration.store.SimpleMessageStore; 17import org.springframework.messaging.MessageChannel; 18import org.springframework.messaging.PollableChannel; 19import org.springframework.stereotype.Component; 20 21@Slf4j 22@Component 23@IntegrationComponentScan 24public class BasicIntegration { 25 26 @Bean 27 public IntegrationFlow performSplit() { 28 return IntegrationFlow.from(\u0026#34;inputChannel1\u0026#34;) 29 .split() 30 .transform(\u0026#34;Hello \u0026#34;::concat) 31 .handle(message -\u0026gt; { 32 log.info(\u0026#34;performSplit: {}\u0026#34;, message); 33 }) 34 .get(); 35 } 36 37 @Bean 38 public IntegrationFlow performAggregate(SimpleMessageStore messageStore) { 39 return IntegrationFlow.from(\u0026#34;inputChannel2\u0026#34;) 40 .split() 41 .aggregate(a -\u0026gt; 42 a.correlationStrategy(m -\u0026gt; { 43 Customer customer = (Customer) m.getPayload(); 44 return customer.getCity(); 45 }) 46 .releaseStrategy(g -\u0026gt; g.size() \u0026gt; 2) 47 .messageStore(messageStore)) 48 .handle(message -\u0026gt; { 49 log.info(\u0026#34;performAggregate: {}\u0026#34;, message); 50 }) 51 .get(); 52 } 53 54 @Bean 55 public IntegrationFlow performRoute() { 56 return IntegrationFlow.from(\u0026#34;inputChannel3\u0026#34;) 57 .split() 58 .log() 59 .route(Customer.class, m -\u0026gt; m.getCity(), m -\u0026gt; m 60 .channelMapping(\u0026#34;New York\u0026#34;, \u0026#34;channelA\u0026#34;) 61 .channelMapping(\u0026#34;Bangalore\u0026#34;, \u0026#34;channelB\u0026#34;)) 62 .get(); 63 } 64 65 @Bean 66 public IntegrationFlow handleNewYork() { 67 return IntegrationFlow.from(\u0026#34;channelA\u0026#34;) 68 .handle(message -\u0026gt; { 69 log.info(\u0026#34;handleNewYork: {}\u0026#34;, message); 70 }) 71 .get(); 72 } 73 74 @Bean 75 public IntegrationFlow handleBangalore() { 76 return IntegrationFlow.from(\u0026#34;channelB\u0026#34;) 77 .handle(message -\u0026gt; { 78 log.info(\u0026#34;handleBangalore: {}\u0026#34;, message); 79 }) 80 .get(); 81 } 82 83 @Bean 84 public IntegrationFlow performSubFlow(IntegrationFlow subFlowNewYork, IntegrationFlow subFlowBangalore) { 85 return IntegrationFlow.from(\u0026#34;inputChannel4\u0026#34;) 86 .split() 87 .log() 88 .route(Customer.class, m -\u0026gt; m.getCity(), m -\u0026gt; m 89 .subFlowMapping(\u0026#34;New York\u0026#34;, subFlowNewYork) 90 .subFlowMapping(\u0026#34;Bangalore\u0026#34;, subFlowBangalore)) 91 .get(); 92 } 93 94 @Bean 95 public IntegrationFlow subFlowNewYork() { 96 return f -\u0026gt; f.handle(m -\u0026gt; log.info(\u0026#34;subFlowNewYork: {}\u0026#34;, m)); 97 } 98 99 @Bean 100 public IntegrationFlow subFlowBangalore() { 101 return f -\u0026gt; f.handle(m -\u0026gt; log.info(\u0026#34;subFlowBangalore: {}\u0026#34;, m)); 102 } 103 104 @Bean 105 public IntegrationFlow performBridge() { 106 return IntegrationFlow.from(\u0026#34;polledChannel\u0026#34;) 107 .bridge(e -\u0026gt; e.poller(Pollers.fixedDelay(5000).maxMessagesPerPoll(10))) 108 .handle(message -\u0026gt; { 109 log.info(\u0026#34;performBridge: {}\u0026#34;, message); 110 }) 111 .get(); 112 } 113 114 @Bean 115 public IntegrationFlow readInputChannel5_sub1() { 116 return IntegrationFlow.from(\u0026#34;inputChannel5_sub1\u0026#34;) 117 .handle(message -\u0026gt; { 118 log.info(\u0026#34;readInputChannel5_sub1: {}\u0026#34;, message); 119 }) 120 .get(); 121 } 122 123 @Bean 124 public IntegrationFlow readInputChannel5_sub2() { 125 return IntegrationFlow.from(\u0026#34;inputChannel5_sub2\u0026#34;) 126 .handle(message -\u0026gt; { 127 log.info(\u0026#34;readInputChannel5_sub2: {}\u0026#34;, message); 128 }) 129 .get(); 130 } 131 132 @Bean 133 public IntegrationFlow performDynamicBridge() { 134 List\u0026lt;String\u0026gt; cities = Arrays.asList(\u0026#34;New York\u0026#34;, \u0026#34;Bangalore\u0026#34;, \u0026#34;London\u0026#34;); 135 return IntegrationFlow.from(\u0026#34;inputChannel6\u0026#34;) 136 .split() 137 .route(Customer.class, m -\u0026gt; m.getCity(), m -\u0026gt; { 138 cities.forEach(city -\u0026gt; { 139 m.subFlowMapping(city, subFlow -\u0026gt; subFlow.publishSubscribeChannel(c -\u0026gt; { 140 c.ignoreFailures(true); 141 c.subscribe(s -\u0026gt; s.handle(h -\u0026gt; { 142 Customer customer = (Customer) h.getPayload(); 143 customer.setName(customer.getName().toUpperCase()); 144 log.info(\u0026#34;Handle: {}\u0026#34;, customer); 145 })); 146 }).bridge()); 147 }); 148 }) 149 .aggregate() 150 .handle(m -\u0026gt; { 151 log.info(\u0026#34;performDynamicBridge: {}\u0026#34;, m); 152 }) 153 .get(); 154 } 155 156 @Bean 157 public MessageChannel inputChannel1() { 158 return new DirectChannel(); 159 } 160 161 @Bean 162 public MessageChannel inputChannel2() { 163 return new DirectChannel(); 164 } 165 166 @Bean 167 public MessageChannel inputChannel3() { 168 return new DirectChannel(); 169 } 170 171 @Bean 172 public MessageChannel inputChannel4() { 173 return new DirectChannel(); 174 } 175 176 @Bean 177 public MessageChannel inputChannel5() { 178 return new PublishSubscribeChannel(); 179 } 180 181 @Bean 182 @BridgeFrom(\u0026#34;inputChannel5\u0026#34;) 183 public MessageChannel inputChannel5_sub1() { 184 return new DirectChannel(); 185 } 186 187 @Bean 188 @BridgeFrom(\u0026#34;inputChannel5\u0026#34;) 189 public MessageChannel inputChannel5_sub2() { 190 return new DirectChannel(); 191 } 192 193 @Bean 194 public MessageChannel inputChannel6() { 195 return new DirectChannel(); 196 } 197 198 @Bean 199 public PollableChannel polledChannel() { 200 return new QueueChannel(); 201 } 202 203 @Bean 204 public SimpleMessageStore messageStore() { 205 return new SimpleMessageStore(); 206 } 207 208 @Bean 209 public MessageChannel channelA() { 210 return new DirectChannel(); 211 } 212 213 @Bean 214 public MessageChannel channelB() { 215 return new DirectChannel(); 216 } 217} 1package com.demo.project97.integration; 2 3import java.io.File; 4import java.util.List; 5 6import com.demo.project97.domain.Customer; 7import lombok.RequiredArgsConstructor; 8import lombok.extern.slf4j.Slf4j; 9import org.springframework.context.annotation.Bean; 10import org.springframework.integration.channel.DirectChannel; 11import org.springframework.integration.dsl.IntegrationFlow; 12import org.springframework.integration.dsl.Pollers; 13import org.springframework.integration.file.FileNameGenerator; 14import org.springframework.integration.file.dsl.Files; 15import org.springframework.integration.file.support.FileExistsMode; 16import org.springframework.messaging.Message; 17import org.springframework.messaging.MessageChannel; 18import org.springframework.messaging.support.MessageBuilder; 19import org.springframework.stereotype.Component; 20 21@Component 22@RequiredArgsConstructor 23@Slf4j 24public class FileIntegration { 25 26 private final DataTransformer dataTransformer; 27 28 /** 29 * Detects if new file present in folder, checks every 5 seconds 30 * Write the file name to fileChannel1 31 */ 32 @Bean 33 public IntegrationFlow readFile(MessageChannel fileChannel1) { 34 return IntegrationFlow.from( 35 Files.inboundAdapter(new File(\u0026#34;/tmp/src\u0026#34;)) 36 .autoCreateDirectory(true) 37 .preventDuplicates(true) 38 .patternFilter(\u0026#34;*.txt\u0026#34;), 39 e -\u0026gt; e.poller(Pollers.fixedRate(5000)) 40 ) 41 .transform(dataTransformer, \u0026#34;convertFileToCustomers\u0026#34;) 42 .handle(message -\u0026gt; { 43 @SuppressWarnings(\u0026#34;unchecked\u0026#34;) 44 List\u0026lt;Customer\u0026gt; customers = (List\u0026lt;Customer\u0026gt;) message.getPayload(); 45 log.info(\u0026#34;Customers: {}\u0026#34;, customers); 46 for (Customer c : customers) { 47 fileChannel1.send(MessageBuilder.withPayload(c).build()); 48 } 49 }) 50 .get(); 51 } 52 53 @Bean 54 public IntegrationFlow readResultChannelWriteToFile() { 55 return IntegrationFlow.from(\u0026#34;fileChannel2\u0026#34;) 56 .transform(dataTransformer, \u0026#34;convertDbRecordToString\u0026#34;) 57 .handle(Files.outboundAdapter(new File(\u0026#34;/tmp/des\u0026#34;)) 58 .autoCreateDirectory(true) 59 .fileNameGenerator(fileNameGenerator()) 60 .fileExistsMode(FileExistsMode.APPEND) 61 .appendNewLine(true)) 62 .get(); 63 } 64 65 private FileNameGenerator fileNameGenerator() { 66 return new FileNameGenerator() { 67 @Override 68 public String generateFileName(Message\u0026lt;?\u0026gt; message) { 69 return message.getHeaders().get(\u0026#34;file-name\u0026#34;).toString().concat(\u0026#34;.txt\u0026#34;); 70 } 71 }; 72 } 73 74 @Bean 75 public MessageChannel fileChannel1() { 76 return new DirectChannel(); 77 } 78 79 @Bean 80 public MessageChannel fileChannel2() { 81 return new DirectChannel(); 82 } 83} 1package com.demo.project97.integration; 2 3import java.time.Duration; 4import java.util.concurrent.TimeUnit; 5 6import com.demo.project97.domain.Customer; 7import jakarta.persistence.EntityManagerFactory; 8import lombok.RequiredArgsConstructor; 9import lombok.SneakyThrows; 10import lombok.extern.slf4j.Slf4j; 11import org.springframework.context.annotation.Bean; 12import org.springframework.integration.channel.DirectChannel; 13import org.springframework.integration.dsl.IntegrationFlow; 14import org.springframework.integration.dsl.Pollers; 15import org.springframework.integration.jpa.dsl.Jpa; 16import org.springframework.integration.jpa.support.PersistMode; 17import org.springframework.messaging.MessageChannel; 18import org.springframework.stereotype.Component; 19 20@Component 21@RequiredArgsConstructor 22@Slf4j 23public class JPAIntegration { 24 25 private final EntityManagerFactory entityManager; 26 27 /** 28 * Continuously reads the customer table every 10 seconds 29 */ 30 @Bean 31 public IntegrationFlow readFromDbAdapter() { 32 return IntegrationFlow.from(Jpa.inboundAdapter(this.entityManager) 33 .jpaQuery(\u0026#34;from Customer where phone is not null\u0026#34;) 34 .maxResults(2) 35 .expectSingleResult(false) 36 .entityClass(Customer.class), e -\u0026gt; e.poller(Pollers.fixedDelay(Duration.ofSeconds(10)))) 37 .handle(message -\u0026gt; { 38 log.info(\u0026#34;readFromDbAdapter: {}\u0026#34;, message); 39 }) 40 .get(); 41 } 42 43 /** 44 * Starts the flow when the id for customer is pushed to dbChannel1 45 */ 46 @Bean 47 public IntegrationFlow readFromDbGateway(MessageChannel dbChannel2, MessageChannel dbChannel3) { 48 return IntegrationFlow.from(\u0026#34;dbChannel1\u0026#34;) 49 .handle(Jpa.retrievingGateway(this.entityManager) 50 .jpaQuery(\u0026#34;from Customer c where c.id = :id\u0026#34;) 51 .expectSingleResult(true) 52 .parameterExpression(\u0026#34;id\u0026#34;, \u0026#34;payload[id]\u0026#34;)) 53 .handle(message -\u0026gt; { 54 log.info(\u0026#34;readFromDbGateway: {}\u0026#34;, message); 55 Customer payload = (Customer) message.getPayload(); 56 log.info(\u0026#34;readFromDbGateway Customer: {}\u0026#34;, payload); 57 dbChannel2.send(message); 58 dbChannel3.send(message); 59 }) 60 .get(); 61 } 62 63 /** 64 * Reads dbChannel2 and updates phone number 65 * Doesnt return anything 66 */ 67 @Bean 68 public IntegrationFlow updateDbAdapter() { 69 return IntegrationFlow.from(\u0026#34;dbChannel2\u0026#34;) 70 .handle(Jpa.outboundAdapter(this.entityManager) 71 .jpaQuery(\u0026#34;update Customer c set c.phone = \u0026#39;88888\u0026#39; where c.id =:id\u0026#34;) 72 .parameterExpression(\u0026#34;id\u0026#34;, \u0026#34;payload.id\u0026#34;), e -\u0026gt; e.transactional()) 73 .get(); 74 } 75 76 /** 77 * Reads dbChannel2 and updates phone number 78 * Doesnt return anything 79 */ 80 @Bean 81 public IntegrationFlow updateDbGateway() { 82 return IntegrationFlow.from(\u0026#34;dbChannel3\u0026#34;) 83 .handle(Jpa.updatingGateway(this.entityManager) 84 .jpaQuery(\u0026#34;update Customer c set c.name = CONCAT(\u0026#39;Mr. \u0026#39;,c.name) where c.id =:id\u0026#34;) 85 .parameterExpression(\u0026#34;id\u0026#34;, \u0026#34;payload.id\u0026#34;), e -\u0026gt; e.transactional()) 86 .handle(message -\u0026gt; { 87 log.info(\u0026#34;updateDbGateway: {}\u0026#34;, message); 88 }) 89 .get(); 90 } 91 92 /** 93 * Reads dbChannel3 and deletes the customer 94 */ 95 @Bean 96 public IntegrationFlow deleteRecord() { 97 return IntegrationFlow.from(Jpa.inboundAdapter(this.entityManager) 98 .jpaQuery(\u0026#34;from Customer where name like \u0026#39;Mr.%\u0026#39;\u0026#34;) 99 .maxResults(2) 100 .expectSingleResult(false) 101 .entityClass(Customer.class), e -\u0026gt; e.poller(Pollers.fixedDelay(Duration.ofSeconds(10)))) 102 .handle(Jpa.outboundAdapter(this.entityManager) 103 .persistMode(PersistMode.DELETE) 104 .parameterExpression(\u0026#34;id\u0026#34;, \u0026#34;payload.id\u0026#34;) 105 .entityClass(Customer.class), e -\u0026gt; e.transactional()) 106 .get(); 107 } 108 109 /** 110 * Reads the fileChannel1 and persists all customers 111 */ 112 @Bean 113 public IntegrationFlow readFileChannelWriteToDb() { 114 return IntegrationFlow.from(\u0026#34;fileChannel1\u0026#34;) 115 .handle(Jpa.outboundAdapter(this.entityManager) 116 .entityClass(Customer.class) 117 .persistMode(PersistMode.PERSIST), 118 e -\u0026gt; e.transactional(true)) 119 .get(); 120 } 121 122 /** 123 * Reads the fileChannel1 and persists all customers 124 */ 125 @Bean 126 public IntegrationFlow readRabbitmqChannelUpdateDb() { 127 return IntegrationFlow.from(\u0026#34;rabbitmqChannel1\u0026#34;) 128 .handle(Jpa.outboundAdapter(this.entityManager) 129 .jpaQuery(\u0026#34;update Customer c set c.phone = :phone where c.name =:name\u0026#34;) 130 .parameterExpression(\u0026#34;phone\u0026#34;, \u0026#34;payload.phone\u0026#34;) 131 .parameterExpression(\u0026#34;name\u0026#34;, \u0026#34;payload.name\u0026#34;) 132 , e -\u0026gt; e.transactional()) 133 .get(); 134 } 135 136 @SneakyThrows 137 public void sleep(int seconds) { 138 TimeUnit.SECONDS.sleep(seconds); 139 } 140 141 @Bean 142 public MessageChannel dbChannel1() { 143 return new DirectChannel(); 144 } 145 146 @Bean 147 public MessageChannel dbChannel2() { 148 return new DirectChannel(); 149 } 150 151 @Bean 152 public MessageChannel dbChannel3() { 153 return new DirectChannel(); 154 } 155 156 157} 1package com.demo.project97.integration; 2 3import com.demo.project97.domain.Customer; 4import lombok.RequiredArgsConstructor; 5import lombok.extern.slf4j.Slf4j; 6import org.springframework.amqp.AmqpRejectAndDontRequeueException; 7import org.springframework.amqp.core.Queue; 8import org.springframework.amqp.rabbit.connection.ConnectionFactory; 9import org.springframework.context.annotation.Bean; 10import org.springframework.core.AttributeAccessor; 11import org.springframework.integration.amqp.dsl.Amqp; 12import org.springframework.integration.channel.DirectChannel; 13import org.springframework.integration.dsl.IntegrationFlow; 14import org.springframework.integration.support.ErrorMessageStrategy; 15import org.springframework.messaging.MessageChannel; 16import org.springframework.messaging.support.ErrorMessage; 17import org.springframework.stereotype.Component; 18 19@Component 20@RequiredArgsConstructor 21@Slf4j 22public class RabbitMQIntegration { 23 24 private final ConnectionFactory connectionFactory; 25 private final DataTransformer dataTransformer; 26 27 @Bean 28 public IntegrationFlow readFromQueue(MessageChannel rabbitmqChannel1) { 29 return IntegrationFlow.from(Amqp.inboundAdapter(connectionFactory, \u0026#34;phone-queue\u0026#34;) 30 .errorChannel(\u0026#34;errorChannel\u0026#34;) 31 .errorMessageStrategy(new RabbitMQIntegration.MyFatalExceptionStrategy())) 32 .transform(dataTransformer, \u0026#34;convertQueuePayloadToCustomer\u0026#34;) 33 .handle(message -\u0026gt; { 34 log.info(\u0026#34;readFromQueue: {}\u0026#34;, message); 35 rabbitmqChannel1.send(message); 36 }) 37 .get(); 38 } 39 40 public static class MyFatalExceptionStrategy implements ErrorMessageStrategy { 41 @Override 42 public ErrorMessage buildErrorMessage(Throwable payload, AttributeAccessor attributes) { 43 throw new AmqpRejectAndDontRequeueException(\u0026#34;Error In Message!\u0026#34;); 44 } 45 } 46 47 @Bean 48 public MessageChannel rabbitmqChannel1() { 49 DirectChannel channel = new DirectChannel(); 50 channel.setDatatypes(Customer.class); 51 return channel; 52 } 53 54 @Bean 55 public Queue inboundQueue() { 56 return new Queue(\u0026#34;phone-queue\u0026#34;, true, false, false); 57 } 58} Postman Import the postman collection to postman\nPostman Collection\nSetup 1# Project97 2 3Spring Integration 4 5[https://gitorko.github.io/spring-integration-basics/](https://gitorko.github.io/spring-integration-basics/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk 17.0.3 2022-04-19 LTS 14``` 15 16### RabbitMQ 17 18Run the docker command to start a rabbitmq instance 19 20```bash 21docker run -d --hostname my-rabbit -p 8080:15672 -p 5672:5672 rabbitmq:3-management 22``` 23 24Login to rabbitmq console [http://localhost:8080](http://localhost:8080) 25 26``` 27username:guest 28password: guest 29``` 30 31### Postgres DB 32 33``` 34docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:9.6.10 35docker ps 36docker exec -it pg-container psql -U postgres -W postgres 37CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 38CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 39grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 40 41docker stop pg-container 42docker start pg-container 43``` 44 45### Dev 46 47To run the code. 48 49```bash 50./gradlew clean build 51./gradlew bootRun 52``` References https://spring.io/projects/spring-integration\nhttps://www.enterpriseintegrationpatterns.com/\n","link":"https://gitorko.github.io/post/spring-integration-basics/","section":"post","tags":["Enterprise-Integration-Patterns"],"title":"Spring Integration - Basics"},{"body":"","link":"https://gitorko.github.io/categories/spring-integration/","section":"categories","tags":null,"title":"Spring-Integration"},{"body":"","link":"https://gitorko.github.io/tags/graphql/","section":"tags","tags":null,"title":"Graphql"},{"body":"","link":"https://gitorko.github.io/categories/graphql/","section":"categories","tags":null,"title":"GraphQL"},{"body":"","link":"https://gitorko.github.io/tags/pagination/","section":"tags","tags":null,"title":"Pagination"},{"body":"GraphQL is a query language that offers an alternative model to developing APIs instead of REST, SOAP or gRPC. It allows partial fetch of data, you can use a single endpoint to fetch different formats of data.\nGithub: https://github.com/gitorko/project96\nSpring Boot GraphQL Lets say you have a rest api that returns customer profile, the customer profile has 200+ fields, so a mobile device may not need all the fields, it may need may be 5 fields like name, address etc. Requesting a big payload over wire is costly. So now you end up writing a rest endpoint that returns just the 5 fields. This can become overwhelming when the requirements increase and you end up creating different endpoint for such requirement. In GraphQL you define a schema and let the user/consumer decide which fields they want to fetch.\nBefore GraphQL 1.0 was Released spring had to extend the classes GraphQLMutationResolver, GraphQLQueryResolver. Its no longer required.\nGraphQLMutationResolver -\u0026gt; @MutationMapping\nGraphQLQueryResolver -\u0026gt; @QueryMapping\nThe code uses Extended Scalars for graphql-java to support Date and other type objects in GraphQL The code shows how pagination can be done in GraphQL\nCode 1package com.demo.project96.controller; 2 3import java.util.Optional; 4 5import com.demo.project96.domain.Comment; 6import com.demo.project96.domain.Post; 7import com.demo.project96.domain.PostPage; 8import com.demo.project96.repo.CommentRepository; 9import com.demo.project96.repo.PostRepository; 10import lombok.RequiredArgsConstructor; 11import lombok.extern.slf4j.Slf4j; 12import org.springframework.data.domain.Page; 13import org.springframework.data.domain.PageRequest; 14import org.springframework.graphql.data.method.annotation.Argument; 15import org.springframework.graphql.data.method.annotation.QueryMapping; 16import org.springframework.graphql.data.method.annotation.SchemaMapping; 17import org.springframework.stereotype.Controller; 18 19@Controller 20@Slf4j 21@RequiredArgsConstructor 22public class QueryController { 23 24 private final PostRepository postRepository; 25 private final CommentRepository commentRepository; 26 27 @QueryMapping 28 public Iterable\u0026lt;Post\u0026gt; findAllPosts() { 29 return postRepository.findAll(); 30 } 31 32 @QueryMapping 33 public PostPage findAllPostsPage(@Argument Integer page, @Argument Integer size) { 34 PageRequest pageOf = PageRequest.of(page, size); 35 Page\u0026lt;Post\u0026gt; all = postRepository.findAll(pageOf); 36 return PostPage.builder() 37 .posts(all.getContent()) 38 .totalElements(all.getTotalElements()) 39 .totalPages(all.getTotalPages()) 40 .currentPage(all.getNumber()) 41 .size(all.getNumberOfElements()) 42 .build(); 43 } 44 45 @QueryMapping 46 public Optional\u0026lt;Post\u0026gt; findPostById(@Argument(\u0026#34;id\u0026#34;) Long id) { 47 return postRepository.findById(id); 48 } 49 50 @QueryMapping 51 public Iterable\u0026lt;Comment\u0026gt; findAllComments() { 52 //Will cause N+1 problem 53 //return commentRepository.findAll(); 54 return commentRepository.findAllComments(); 55 } 56 57 @QueryMapping 58 public Optional\u0026lt;Comment\u0026gt; findCommentById(@Argument(\u0026#34;id\u0026#34;) Long id) { 59 return commentRepository.findById(id); 60 } 61 62 @QueryMapping 63 public long countPosts() { 64 return postRepository.count(); 65 } 66 67 @QueryMapping 68 public Iterable\u0026lt;Comment\u0026gt; findCommentsByPostId(@Argument(\u0026#34;postId\u0026#34;) Long postId) { 69 Optional\u0026lt;Post\u0026gt; byId = postRepository.findById(postId); 70 if (byId.isPresent()) { 71 return commentRepository.findByPost(byId.get()); 72 } else { 73 throw new RuntimeException(\u0026#34;Post not found!\u0026#34;); 74 } 75 } 76 77 /** 78 * Functionality will work same without this method as well. 79 * Hibernate Lazy fetch prevents the post entity from being fetched even without this method. 80 * So no unnecessary db call is made if post entity is not needed in the response even without this method. 81 * However if there is any reason why we want to control a single field explicitly we can use this approach and define how that field gets data. 82 * eg: You want to sort the comments 83 */ 84 @SchemaMapping(typeName = \u0026#34;Comment\u0026#34;, field = \u0026#34;post\u0026#34;) 85 public Post getPost(Comment comment) { 86 return postRepository.findById(comment.getPost().getId()).orElseThrow(null); 87 } 88 89} 1package com.demo.project96.controller; 2 3import java.time.ZonedDateTime; 4import java.util.Optional; 5 6import com.demo.project96.domain.Comment; 7import com.demo.project96.domain.Post; 8import com.demo.project96.repo.CommentRepository; 9import com.demo.project96.repo.PostRepository; 10import lombok.RequiredArgsConstructor; 11import lombok.extern.slf4j.Slf4j; 12import org.springframework.graphql.data.method.annotation.Argument; 13import org.springframework.graphql.data.method.annotation.MutationMapping; 14import org.springframework.stereotype.Controller; 15 16@Controller 17@Slf4j 18@RequiredArgsConstructor 19public class MutationController { 20 21 private final PostRepository postRepository; 22 private final CommentRepository commentRepository; 23 24 @MutationMapping 25 public Post createPost(@Argument(\u0026#34;header\u0026#34;) String header, @Argument(\u0026#34;createdBy\u0026#34;) String createdBy) { 26 Post post = new Post(); 27 post.setHeader(header); 28 post.setCreatedBy(createdBy); 29 post.setCreatedDt(ZonedDateTime.now()); 30 post = postRepository.save(post); 31 return post; 32 } 33 34 @MutationMapping 35 public Comment createComment(@Argument(\u0026#34;message\u0026#34;) String message, @Argument(\u0026#34;createdBy\u0026#34;) String createdBy, @Argument(\u0026#34;postId\u0026#34;) Long postId) { 36 Comment comment = new Comment(); 37 Optional\u0026lt;Post\u0026gt; byId = postRepository.findById(postId); 38 if (byId.isPresent()) { 39 Post post = byId.get(); 40 comment.setPost(post); 41 comment.setMessage(message); 42 comment.setCreatedBy(createdBy); 43 comment.setCreatedDt(ZonedDateTime.now()); 44 comment = commentRepository.save(comment); 45 return comment; 46 } else { 47 throw new RuntimeException(\u0026#34;Post not found!\u0026#34;); 48 } 49 50 } 51 52 @MutationMapping 53 public boolean deleteComment(@Argument(\u0026#34;id\u0026#34;) Long id) { 54 commentRepository.deleteById(id); 55 return true; 56 } 57 58 @MutationMapping 59 public Comment updateComment(@Argument(\u0026#34;id\u0026#34;) Long id, @Argument(\u0026#34;message\u0026#34;) String message) { 60 Optional\u0026lt;Comment\u0026gt; byId = commentRepository.findById(id); 61 if (byId.isPresent()) { 62 Comment comment = byId.get(); 63 comment.setMessage(message); 64 commentRepository.save(comment); 65 return comment; 66 } 67 throw new RuntimeException(\u0026#34;Post not found!\u0026#34;); 68 } 69} The schema for GraphQL. The ! simply tells us that you can always expect a value back and will never need to check for null.\n1scalar DateTime 2 3type Post { 4 id: ID! 5 header: String! 6 createdDt: DateTime! 7 createdBy: String! 8} 9 10type PostPage { 11 posts: [Post] 12 totalElements: Int 13 totalPages: Int 14 currentPage: Int 15 size: Int 16} 17 18type Query { 19 findAllPosts: [Post] 20 findPostById(id: ID!): Post 21 countPosts: Int! 22 findAllPostsPage(page: Int = 0, size: Int = 20): PostPage 23} 24 25type Mutation { 26 createPost(header: String!, createdBy: String!): Post 27} GraphQL accepts only one root Query and one root Mutation types, To keep the logic in different files we extend the Query and Mutation types.\n1type Comment { 2 id: ID! 3 message: String! 4 createdBy: String! 5 createdDt: DateTime! 6 post: Post 7} 8 9extend type Query { 10 findAllComments: [Comment]! 11 findCommentById(id: ID!): Comment! 12 findCommentsByPostId(postId: ID!): [Comment] 13} 14 15extend type Mutation { 16 createComment(message: String!, createdBy: String!, postId: ID!): Comment! 17 updateComment(id: ID!, message: String!): Comment! 18 deleteComment(id: ID!): Boolean 19} The key terminologies in GraphQL are\nQuery: Used to read data Mutation: Used to create, update and delete data Subscription: Similar to a query allowing you to fetch data from the server. Subscriptions offer a long-lasting operation that can change their result over time. Postman Import the postman collection to postman\nPostman Collection\nSetup 1# Project 96 2 3Spring Boot \u0026amp; GraphQL 4 5[https://gitorko.github.io/spring-graphql/](https://gitorko.github.io/spring-graphql/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk version \u0026#34;21.0.3\u0026#34; 2024-04-16 LTS 14 15node --version 16v16.16.0 17 18yarn --version 191.22.18 20``` 21 22### Postgres DB 23 24``` 25docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:9.6.10 26docker ps 27docker exec -it pg-container psql -U postgres -W postgres 28CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 29CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 30grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 31 32docker stop pg-container 33docker start pg-container 34``` 35 36### Dev 37 38To run the backend in dev mode. 39Postgres DB is needed to run the integration tests during build. 40 41```bash 42./gradlew clean build 43./gradlew bootRun 44``` 45 46### Prod 47 48To run as a single jar. 49 50```bash 51./gradlew bootJar 52cd project96/build/libs 53java -jar project96-1.0.0.jar 54``` 55 56### Graph IQL 57 58GraphQL comes with a browser client to test the Query. This can be enabled in properties 59 60```yaml 61graphql.graphiql.enabled: true 62``` 63 64Open [http://localhost:8080/graphiql](http://localhost:8080/graphiql) 65 66### Postman 67 68Import the postman collection to postman 69 70[Postman Collection](https://github.com/gitorko/project96/blob/main/postman/Project96.postman_collection.json) References https://spring.io/projects/spring-graphql\nhttps://github.com/graphql-java/graphql-java-extended-scalars\nhttps://www.graphql-java.com/tutorials/getting-started-with-spring-boot/\nhttps://spring.io/blog/2022/05/19/spring-for-graphql-1-0-release\n","link":"https://gitorko.github.io/post/spring-graphql/","section":"post","tags":["spring","spring-boot","graphql","pagination"],"title":"Spring - GraphQL"},{"body":"Learn morse code in under 10 mins with pictures\nReference Sheet Story A - Alligator, Has a head and a tail [dot dash]\nB - Bear, Goldilocks \u0026amp; 3 bears, Goldilock is big and 3 small bears [dash dot dot dot]\nC - Caterpillar, 2 Caterpillars following each other [dash dot dash dot]\nD - Dog, A big dog chasing 2 small cats [dash dot dot]\nE - Eye, A monster with one eye [dot]\nF - Frog, Two frogs jump in a pond, one comes out [dot dot dash dot]\nG - Giraffe, Giraffe family, dad,mom,baby [dash dash dot]\nH - Horse, 4 Horses in a race [dot dot dot dot]\nI - Ice-Cream, 2 cherries on an ice-cream [dot dot]\nJ - Joker, Joker juggling 3 big hats [dot dash dash dash]\nK - Karate, Looks like a karate belt [dash dot dash]\nL - Lorry, A wheel in the front then body \u0026amp; then two wheels at back [dot dash dot dot]\nM - Monkey, 2 monkeys hanging on 2 branches [dash dash]\nN - Naughty, 1 Naughty caterpillar [dash dot]\nO - Owl, 3 owls sitting on 3 branches [dash dash dash]\nP - Parrot, 2 angry parrots not talking to each other [dot dash dash dot]\nQ - Queen, 2 Queen love the king [dash dash dot dash]\nR - Racing car, Wheel then body \u0026amp; then wheel [dot dash dot]\nS - Snake, 3 snakes in a hole [dot dot dot]\nT - Tea, One cup of tea [dash]\nU - Universe, In another universe 2 cats chase a dog [dot dot dash]\nV - Violin, 3 small violin but only 1 big bow stick [dot dot dot dash]\nW - Whale, Blow hole in head then big body of whale [dot dash dash]\nX - GoGru, Looks like GoGru Ear,Eye,Eye,Ear [dash dot dot dash]\nY - Yoda, Looks like Yoda's light saber hilt,button,light,light [dash dot dash dash]\nZ - Zebra, 2 zebra fall in 2 black holes [dash dash dot dot]\nOnce you know the picture association you will never forget morse code! To get better at it Practice!\n","link":"https://gitorko.github.io/post/learn-morse-code/","section":"post","tags":["morse-code"],"title":"Learn Morse Code"},{"body":"","link":"https://gitorko.github.io/tags/morse-code/","section":"tags","tags":null,"title":"Morse-Code"},{"body":"","link":"https://gitorko.github.io/categories/morse-code/","section":"categories","tags":null,"title":"Morse-Code"},{"body":"","link":"https://gitorko.github.io/categories/rabbitmq/","section":"categories","tags":null,"title":"RabbitMQ"},{"body":"RabbitMQ Stream implementation.\nGithub: https://github.com/gitorko/project74\nRabbitMQ Streams Streams implement append-only log, messages are persistent and replicated.\nLarge fan-outs - Deliver the same message to multiple subscribers Replay / Time-travelling - Read messages from any point. Throughput Performance - Log based messaging deliver performance compared to traditional queues. Large logs - Streams are designed to store larger amounts of data in an efficient manner with minimal in-memory overhead. Code 1package com.demo.project74; 2 3import java.nio.charset.StandardCharsets; 4import java.time.Duration; 5import java.util.concurrent.CountDownLatch; 6import java.util.concurrent.TimeUnit; 7import java.util.stream.IntStream; 8 9import com.rabbitmq.stream.ByteCapacity; 10import com.rabbitmq.stream.Consumer; 11import com.rabbitmq.stream.Environment; 12import com.rabbitmq.stream.Message; 13import com.rabbitmq.stream.OffsetSpecification; 14import com.rabbitmq.stream.Producer; 15import lombok.SneakyThrows; 16import lombok.extern.slf4j.Slf4j; 17import org.springframework.scheduling.annotation.Async; 18import org.springframework.scheduling.annotation.EnableAsync; 19import org.springframework.stereotype.Service; 20 21@EnableAsync 22@Service 23@Slf4j 24public class AsyncService { 25 26 private static final int MESSAGE_COUNT = 10; 27 private static final String STREAM_NAME = \u0026#34;my-stream\u0026#34;; 28 29 @SneakyThrows 30 @Async 31 public void producer() { 32 log.info(\u0026#34;Starting producer!\u0026#34;); 33 try (Environment environment = Environment.builder().uri(\u0026#34;rabbitmq-stream://localhost:5552\u0026#34;).build()) { 34 environment.streamCreator() 35 .stream(STREAM_NAME) 36 .maxAge(Duration.ofHours(6)) 37 .maxSegmentSizeBytes(ByteCapacity.MB(500)) 38 .create(); 39 Producer producer = environment 40 .producerBuilder() 41 .stream(STREAM_NAME) 42 .build(); 43 44 CountDownLatch confirmLatch = new CountDownLatch(MESSAGE_COUNT); 45 IntStream.range(0, MESSAGE_COUNT).forEach(i -\u0026gt; { 46 Message message = producer.messageBuilder() 47 .properties() 48 .creationTime(System.currentTimeMillis()) 49 .messageId(i) 50 .messageBuilder() 51 .addData((\u0026#34;customer_\u0026#34; + i).getBytes(StandardCharsets.UTF_8)) 52 .build(); 53 producer.send(message, confirmationStatus -\u0026gt; confirmLatch.countDown()); 54 log.info(\u0026#34;Published: {}\u0026#34;, message.getBody()); 55 try { 56 TimeUnit.SECONDS.sleep(1); 57 } catch (InterruptedException e) { 58 throw new RuntimeException(e); 59 } 60 }); 61 boolean done = confirmLatch.await(1, TimeUnit.MINUTES); 62 log.info(\u0026#34;Completed send: {}\u0026#34;, done); 63 //environment.deleteStream(STREAM_NAME); 64 } 65 } 66 67 @SneakyThrows 68 @Async 69 public void consumer() { 70 log.info(\u0026#34;Starting consumer!\u0026#34;); 71 TimeUnit.SECONDS.sleep(2); 72 try (Environment environment = Environment.builder().uri(\u0026#34;rabbitmq-stream://localhost:5552\u0026#34;).build()) { 73 Consumer consumer = environment.consumerBuilder() 74 .stream(STREAM_NAME) 75 .offset(OffsetSpecification.last()) 76 .messageHandler((context, message) -\u0026gt; { 77 log.info(\u0026#34;Consumed: {}\u0026#34;, message.getBody()); 78 }) 79 .build(); 80 //Don\u0026#39;t let the thread end. 81 CountDownLatch finishLatch = new CountDownLatch(1); 82 finishLatch.await(); 83 } 84 } 85} Setup 1# Project 74 2 3RabbitMQ Stream 4 5[https://gitorko.github.io/rabbitmq-stream/](https://gitorko.github.io/rabbitmq-stream/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk version \u0026#34;21.0.3\u0026#34; 2024-04-16 LTS 14``` 15 16### RabbitMQ 17 18Run the docker command to start a rabbitmq instance 19 20```bash 21docker run -it --hostname my-rabbit --rm --name my-rabbit -e RABBITMQ_DEFAULT_USER=guest \\ 22-e RABBITMQ_DEFAULT_PASS=guest -e RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS=\u0026#39;-rabbitmq_stream advertised_host localhost\u0026#39; \\ 23-p 8080:15672 -p 5672:5672 -p 5552:5552 rabbitmq:3-management 24``` 25 26```bash 27docker exec my-rabbit rabbitmq-plugins enable rabbitmq_stream 28``` 29 30Open the rabbitmq console 31 32[http://localhost:8080](http://localhost:8080) 33 34``` 35user:guest 36pwd: guest 37``` 38 39### Dev 40 41```bash 42./gradlew bootRun 43``` References https://www.rabbitmq.com/streams.html\n","link":"https://gitorko.github.io/post/rabbitmq-stream/","section":"post","tags":["streams"],"title":"RabbitMQ Streams"},{"body":"","link":"https://gitorko.github.io/tags/streams/","section":"tags","tags":null,"title":"Streams"},{"body":"","link":"https://gitorko.github.io/categories/kubernetes/","section":"categories","tags":null,"title":"Kubernetes"},{"body":"Application deployed on kubernetes, configured with Traefik ingress controller to rate limit.\nGithub: https://github.com/gitorko/project95\nTraefik Traefik is a reverse proxy and load balancer that makes deploying microservices easy.\nWe will deploy the spring rest application along with postgres db on kubernetes instance. Then we will configure Traefik as ingress controller and apply rate limit on it using Traefik Proxy Middleware. We will use docker desktop kubernetes instance.\nRate limiting is a technique for controlling the rate of requests to your application. It can save you from Denial-of-Service (DoS) or resource starvation problems. Without rate limits, a burst of traffic could bring down the whole service making it unavailable for everybody.\nCode 1apiVersion: apps/v1 2kind: Deployment 3metadata: 4 name: project95 5spec: 6 selector: 7 matchLabels: 8 app: project95 9 strategy: 10 rollingUpdate: 11 maxSurge: 1 12 maxUnavailable: 1 13 type: RollingUpdate 14 replicas: 1 15 template: 16 metadata: 17 labels: 18 app: project95 19 spec: 20 containers: 21 - name: project95 22 image: project95:1.0.0 23 imagePullPolicy: IfNotPresent 24 ports: 25 - containerPort: 8080 26 resources: 27 limits: 28 cpu: \u0026#34;1\u0026#34; 29 memory: \u0026#34;500Mi\u0026#34; 30 31--- 32apiVersion: v1 33kind: ConfigMap 34metadata: 35 name: postgres-config 36 labels: 37 app: postgres 38data: 39 POSTGRES_DB: test-db 40 POSTGRES_USER: test 41 POSTGRES_PASSWORD: test@123 42--- 43apiVersion: v1 44kind: PersistentVolume 45metadata: 46 name: postgres-pv-volume 47 labels: 48 type: local 49 app: postgres 50spec: 51 storageClassName: manual 52 capacity: 53 storage: 5Gi 54 accessModes: 55 - ReadWriteMany 56 hostPath: 57 path: \u0026#34;/tmp/data\u0026#34; 58--- 59apiVersion: v1 60kind: PersistentVolumeClaim 61metadata: 62 name: postgres-pv-claim 63 labels: 64 app: postgres 65spec: 66 storageClassName: manual 67 accessModes: 68 - ReadWriteMany 69 resources: 70 requests: 71 storage: 5Gi 72--- 73apiVersion: apps/v1 74kind: Deployment 75metadata: 76 name: db-server 77spec: 78 replicas: 1 79 template: 80 metadata: 81 labels: 82 app: db-server 83 spec: 84 containers: 85 - name: db-server 86 image: postgres:9.6.10 87 imagePullPolicy: \u0026#34;IfNotPresent\u0026#34; 88 ports: 89 - containerPort: 5432 90 envFrom: 91 - configMapRef: 92 name: postgres-config 93 volumeMounts: 94 - mountPath: /var/lib/postgresql/data 95 name: postgredb 96 volumes: 97 - name: postgredb 98 persistentVolumeClaim: 99 claimName: postgres-pv-claim 100 selector: 101 matchLabels: 102 app: db-server 103--- 104apiVersion: v1 105kind: Service 106metadata: 107 name: db-server 108 labels: 109 app: db-server 110spec: 111 type: NodePort 112 ports: 113 - port: 5432 114 selector: 115 app: db-server 116--- 117kind: Service 118apiVersion: v1 119metadata: 120 name: project95 121spec: 122 ports: 123 - port: 8080 124 targetPort: 8080 125 name: http 126 selector: 127 app: project95 128 type: LoadBalancer 1apiVersion: apps/v1 2kind: Deployment 3metadata: 4 name: project95 5 labels: 6 app: project95 7spec: 8 selector: 9 matchLabels: 10 app: project95 11 strategy: 12 rollingUpdate: 13 maxSurge: 1 14 maxUnavailable: 1 15 type: RollingUpdate 16 replicas: 1 17 template: 18 metadata: 19 labels: 20 app: project95 21 spec: 22 containers: 23 - name: project95 24 image: gitorko/project95:1.0.0 25 imagePullPolicy: IfNotPresent 26 ports: 27 - containerPort: 8080 28 resources: 29 limits: 30 cpu: \u0026#34;1\u0026#34; 31 memory: \u0026#34;500Mi\u0026#34; 32 33--- 34apiVersion: v1 35kind: ConfigMap 36metadata: 37 name: postgres-config 38 labels: 39 app: postgres 40data: 41 POSTGRES_DB: test-db 42 POSTGRES_USER: test 43 POSTGRES_PASSWORD: test@123 44--- 45apiVersion: v1 46kind: PersistentVolume 47metadata: 48 name: postgres-pv-volume 49 labels: 50 type: local 51 app: postgres 52spec: 53 storageClassName: manual 54 capacity: 55 storage: 5Gi 56 accessModes: 57 - ReadWriteMany 58 hostPath: 59 path: \u0026#34;/tmp/data\u0026#34; 60--- 61apiVersion: v1 62kind: PersistentVolumeClaim 63metadata: 64 name: postgres-pv-claim 65 labels: 66 app: postgres 67spec: 68 storageClassName: manual 69 accessModes: 70 - ReadWriteMany 71 resources: 72 requests: 73 storage: 5Gi 74--- 75apiVersion: apps/v1 76kind: Deployment 77metadata: 78 name: db-server 79spec: 80 replicas: 1 81 template: 82 metadata: 83 labels: 84 app: db-server 85 spec: 86 containers: 87 - name: db-server 88 image: postgres:9.6.10 89 imagePullPolicy: \u0026#34;IfNotPresent\u0026#34; 90 ports: 91 - containerPort: 5432 92 envFrom: 93 - configMapRef: 94 name: postgres-config 95 volumeMounts: 96 - mountPath: /var/lib/postgresql/data 97 name: postgredb 98 volumes: 99 - name: postgredb 100 persistentVolumeClaim: 101 claimName: postgres-pv-claim 102 selector: 103 matchLabels: 104 app: db-server 105--- 106apiVersion: v1 107kind: Service 108metadata: 109 name: db-server 110 labels: 111 app: db-server 112spec: 113 type: NodePort 114 ports: 115 - port: 5432 116 selector: 117 app: db-server 118--- 119apiVersion: v1 120kind: Service 121metadata: 122 name: project95 123 labels: 124 app: project95 125spec: 126 type: ClusterIP 127 ports: 128 - port: 8080 129 selector: 130 app: project95 131--- 132apiVersion: networking.k8s.io/v1 133kind: Ingress 134metadata: 135 name: my-ingress 136 annotations: 137 kubernetes.io/ingress.class: \u0026#34;traefik\u0026#34; 138spec: 139 rules: 140 - host: localhost.com 141 http: 142 paths: 143 - path: /rest 144 pathType: Prefix 145 backend: 146 service: 147 name: project95 148 port: 149 number: 8080 150--- 1apiVersion: apps/v1 2kind: Deployment 3metadata: 4 name: project95 5 labels: 6 app: project95 7spec: 8 selector: 9 matchLabels: 10 app: project95 11 strategy: 12 rollingUpdate: 13 maxSurge: 1 14 maxUnavailable: 1 15 type: RollingUpdate 16 replicas: 1 17 template: 18 metadata: 19 labels: 20 app: project95 21 spec: 22 containers: 23 - name: project95 24 image: gitorko/project95:1.0.0 25 imagePullPolicy: IfNotPresent 26 ports: 27 - containerPort: 8080 28 resources: 29 limits: 30 cpu: \u0026#34;1\u0026#34; 31 memory: \u0026#34;500Mi\u0026#34; 32 33--- 34apiVersion: v1 35kind: ConfigMap 36metadata: 37 name: postgres-config 38 labels: 39 app: postgres 40data: 41 POSTGRES_DB: test-db 42 POSTGRES_USER: test 43 POSTGRES_PASSWORD: test@123 44--- 45apiVersion: v1 46kind: PersistentVolume 47metadata: 48 name: postgres-pv-volume 49 labels: 50 type: local 51 app: postgres 52spec: 53 storageClassName: manual 54 capacity: 55 storage: 5Gi 56 accessModes: 57 - ReadWriteMany 58 hostPath: 59 path: \u0026#34;/tmp/data\u0026#34; 60--- 61apiVersion: v1 62kind: PersistentVolumeClaim 63metadata: 64 name: postgres-pv-claim 65 labels: 66 app: postgres 67spec: 68 storageClassName: manual 69 accessModes: 70 - ReadWriteMany 71 resources: 72 requests: 73 storage: 5Gi 74--- 75apiVersion: apps/v1 76kind: Deployment 77metadata: 78 name: db-server 79spec: 80 replicas: 1 81 template: 82 metadata: 83 labels: 84 app: db-server 85 spec: 86 containers: 87 - name: db-server 88 image: postgres:9.6.10 89 imagePullPolicy: \u0026#34;IfNotPresent\u0026#34; 90 ports: 91 - containerPort: 5432 92 envFrom: 93 - configMapRef: 94 name: postgres-config 95 volumeMounts: 96 - mountPath: /var/lib/postgresql/data 97 name: postgredb 98 volumes: 99 - name: postgredb 100 persistentVolumeClaim: 101 claimName: postgres-pv-claim 102 selector: 103 matchLabels: 104 app: db-server 105--- 106apiVersion: v1 107kind: Service 108metadata: 109 name: db-server 110 labels: 111 app: db-server 112spec: 113 type: NodePort 114 ports: 115 - port: 5432 116 selector: 117 app: db-server 118--- 119apiVersion: v1 120kind: Service 121metadata: 122 name: project95 123 labels: 124 app: project95 125spec: 126 type: ClusterIP 127 ports: 128 - port: 8080 129 selector: 130 app: project95 131--- 132apiVersion: traefik.containo.us/v1alpha1 133kind: Middleware 134metadata: 135 name: ratelimiter 136spec: 137 rateLimit: 138 average: 3 139 burst: 5 140--- 141apiVersion: traefik.containo.us/v1alpha1 142kind: IngressRoute 143metadata: 144 name: myingressroute 145spec: 146 entryPoints: 147 - web 148 routes: 149 - match: Host(`localhost.com`) \u0026amp;\u0026amp; PathPrefix(`/rest`) 150 kind: Rule 151 services: 152 - kind: Service 153 name: project95 154 port: 8080 155 middlewares: 156 - name: ratelimiter Setup 1# Project 95 2 3Traefik Rate Limit 4 5[https://gitorko.github.io/spring-boot-traefik-rate-limit/](https://gitorko.github.io/spring-boot-traefik-rate-limit/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk version \u0026#34;21.0.3\u0026#34; 2024-04-16 LTS 14 15helm version --short 16v3.9.1+ga7c043a 17 18kubectl version --short 19Client Version: v1.24.3 20Kustomize Version: v4.5.4 21``` 22 23### Postgres DB 24 25``` 26docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:9.6.10 27docker ps 28docker exec -it pg-container psql -U postgres -W postgres 29CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 30CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 31grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 32 33docker stop pg-container 34docker start pg-container 35``` 36 37### Docker 38 39For docker on laptop we cant use localhost as the hostname, so add this entry to the /etc/hosts file. 40 41```bash 42127.0.0.1 localhost.com 43``` 44 45Build the project and docker image 46 47```bash 48cd project95 49./gradlew bootRun 50./gradlew clean build 51docker build -f docker/Dockerfile --force-rm -t project95:1.0.0 . 52``` 53 54If you want to deploy via docker compose. 55 56```bash 57docker tag project95:1.0.0 gitorko/project95:1.0.0 58docker push gitorko/project95:1.0.0 59docker-compose -f docker/docker-compose.yml up 60``` 61 62### Traefik 63 64Deploy traefik via helm 65 66```bash 67helm install traefik traefik/traefik 68``` 69 70Traefik comes with the dashboard to visualize the config that is not exposed so run port forward command. If you dont need to visualize the config then you can skip this step as it is not mandatory 71 72```bash 73kubectl port-forward $(kubectl get pods --selector \u0026#34;app.kubernetes.io/name=traefik\u0026#34; --output=name) 9000:9000 74``` 75 76Open the dashboard url 77 78[http://127.0.0.1:9000/dashboard/](http://127.0.0.1:9000/dashboard/) 79 80### Kubernetes 81 82Now deploy the application on kubernetes 83 84If you want a plain deployment without traefik, This will deploy the spring boot application along with postgres, run the below command 85 86```bash 87kubectl apply -f docker/deployment.yaml 88``` 89 90To test the api, run the curl command 91 92```bash 93curl --request GET \u0026#39;http://localhost.com:8080/rest/time\u0026#39; 94``` 95 96Clean up 97 98```bash 99kubectl delete -f docker/deployment.yaml 100``` 101 102### Kubernetes \u0026amp; Traefik Ingress 103 104If you want traefik as the ingress controller, run the below command 105 106```bash 107kubectl apply -f docker/deployment-traefik.yaml 108``` 109 110To test the api, run the curl command 111 112```bash 113curl --request GET \u0026#39;http://localhost.com/rest/time\u0026#39; 114``` 115 116Clean up 117 118```bash 119kubectl delete -f docker/deployment-traefik.yaml 120``` 121 122### Kubernetes \u0026amp; Traefik IngressRoute with Rate Limit 123 124If you want traefik as the ingress \u0026amp; want to rate limit, run the below command 125 126```bash 127kubectl apply -f docker/deployment-traefik-ratelimit.yaml 128``` 129 130To test the api, run the curl command 131 132```bash 133curl --request GET \u0026#39;http://localhost.com/rest/time\u0026#39; 134``` 135 136Clean up 137 138```bash 139kubectl delete -f docker/deployment-traefik-ratelimit.yaml 140``` 141 142Few command to look at the services 143 144```bash 145kubectl get ingress 146kubectl describe ingress 147 148kubectl get ingressroute 149kubectl describe ingressroute 150 151kubectl get all 152 153k logs -f deployment.apps/project95 --all-containers=true 154 155helm uninstall traefik 156``` Testing Deploy the image to kubernetes\nThe dashboard will show the HTTP Routers \u0026amp; the middleware rate limit config\nYou can also look at success rate\nTo test the rate limit functionality open the RateLimit.jmx file in JMeter and run the test\nCreate a user, the data is persisted in the postgres db.\n1curl --request POST \u0026#39;http://localhost.com/rest/customer\u0026#39; \\ 2--header \u0026#39;Content-Type: application/json\u0026#39; \\ 3--data-raw \u0026#39;{ 4 \u0026#34;firstName\u0026#34; : \u0026#34;John\u0026#34;, 5 \u0026#34;lastName\u0026#34; : \u0026#34;Doe\u0026#34;, 6 \u0026#34;city\u0026#34;: \u0026#34;NY\u0026#34; 7}\u0026#39; Get the user\n1curl --request GET \u0026#39;http://localhost.com/rest/customer\u0026#39; References https://traefik.io/\n","link":"https://gitorko.github.io/post/spring-traefik-rate-limit/","section":"post","tags":["spring","spring-boot","traefik","rate-limit","kubernetes"],"title":"Spring - Traefik (Rate Limit)"},{"body":"","link":"https://gitorko.github.io/tags/traefik/","section":"tags","tags":null,"title":"Traefik"},{"body":"","link":"https://gitorko.github.io/categories/traefik/","section":"categories","tags":null,"title":"Traefik"},{"body":"","link":"https://gitorko.github.io/categories/clarity/","section":"categories","tags":null,"title":"Clarity"},{"body":"","link":"https://gitorko.github.io/tags/jmeter/","section":"tags","tags":null,"title":"Jmeter"},{"body":"","link":"https://gitorko.github.io/tags/redis/","section":"tags","tags":null,"title":"Redis"},{"body":"","link":"https://gitorko.github.io/categories/redis/","section":"categories","tags":null,"title":"Redis"},{"body":"","link":"https://gitorko.github.io/tags/sse/","section":"tags","tags":null,"title":"SSE"},{"body":"","link":"https://gitorko.github.io/categories/sse/","section":"categories","tags":null,"title":"SSE"},{"body":"Voting system developed with Spring Boot, Redis and Angular (Clarity) frontend.\nGithub: https://github.com/gitorko/project94\nQuick Overview To deploy the application in a single command, clone the project, make sure no conflicting docker containers or ports are running and then run\n1git clone https://github.com/gitorko/project94 2cd project94 3docker-compose -f docker/docker-compose.yml up Open http://localhost:8080/\nFeatures Users should be able to vote for candidates.\nThe same solution can be extended to the following systems\nmetering - subscription usage, consumption capping \u0026amp; pricing etc rate limiting - Counter with TTL, Token Bucket, Leaky Bucket, Sliding window counter Prevent denial of service (DoS) Traffic shaping Live visitor/user count Like and dislike count Functional Requirements An active/live voting system shows the live count of the votes as they are cast. The running count should be accurate with no race conditions. Storing of votes is not required, objective is just to track live counts. Who voted to which candidate information need not be stored. Only 2 candidates in the voting system, cats vs dogs. The display should show the live count of votes as they are cast without having the user refresh each time. Display must provide UI to vote for candidates, as well as support api based voting. Non-Functional Requirements Latency should be low. System should be highly available. System should scale well when number of users increases Handle concurrent request and counter value consistent. Future The design can further be modified to use write-back cache to write the running counter to the database. This way we avoid loosing the votes in case redis server goes down. Redis supports AOF (append-only file), which copies write commands to disk as they happen, and snapshotting, which takes the data as it exists at one moment in time and writes it to disk The votes can be persisted to the db by using a queuing mechanism. This will persist the who voted for whom information. We use a queue to keep the latency low. As soon as the vote counter is increased the vote object is queued and a consumer service will dequeue the request and persist to the db. Authentication and user tracking can be added. The project can be changed to spring reactor to make use of non blocking framework. Unsubscribe flow needs to be handled when browser is closed Implementation Design We will use Redis to count the votes, this will help us scale well. The counter increment needs to be atomic in nature. Redis provides this feature out of the box, where there is less contention among threads when updating atomic long. We will not persist the votes to a database as the objective is to keep an active running counter. Adding a database in the synchronous call introduces latency which prevent scaling the application. The backend and frontend bundle into a single uber jar that can be deployed on many servers there by providing ability to horizontally scale. We will use SSE (server sent events) to stream the voting results to the app. This way the live counter will always be displayed. We will use angular clarity for the UI Redis is an open-source (BSD licensed), in-memory data structure store, used as a database, cache, and message broker. Redis provides data structures such as strings, hashes, lists, sets, sorted sets with range queries, bitmaps, hyperloglogs, geospatial indexes, and streams. Redis has built-in data structures, atomic commands, and time-to-live (TTL) capabilities that can be used to power metering use cases. Redis runs on a single thread. Therefore, all of the database updates are serialized, enabling Redis to perform as a lock-free data store. This simplifies the application design as developers don’t need to spend any effort on synchronizing the threads or implementing locking mechanisms for data consistency. Redis stores integers as a base-10 64-bit signed integer. Therefore the maximum limit for an integer is a very large number: 263 – 1 = 9,223,372,036,854,775,807. To understand the problem with a counter on multi-thread environment refer AtomicLong vs LongAdder\nCode 1package com.demo.project94.controller; 2 3import java.util.concurrent.ExecutorService; 4import java.util.concurrent.Executors; 5import java.util.concurrent.TimeUnit; 6 7import lombok.extern.slf4j.Slf4j; 8import org.springframework.beans.factory.annotation.Autowired; 9import org.springframework.data.redis.core.RedisTemplate; 10import org.springframework.http.HttpStatus; 11import org.springframework.http.MediaType; 12import org.springframework.http.ResponseEntity; 13import org.springframework.web.bind.annotation.DeleteMapping; 14import org.springframework.web.bind.annotation.GetMapping; 15import org.springframework.web.bind.annotation.PathVariable; 16import org.springframework.web.bind.annotation.PostMapping; 17import org.springframework.web.bind.annotation.RestController; 18import org.springframework.web.servlet.mvc.method.annotation.SseEmitter; 19 20@RestController 21@Slf4j 22public class HomeController { 23 24 @Autowired 25 private RedisTemplate\u0026lt;String, Long\u0026gt; redisTemplate; 26 27 private ExecutorService executor = Executors.newCachedThreadPool(); 28 29 @PostMapping(value = \u0026#34;/api/vote/{id}\u0026#34;) 30 public Long vote(@PathVariable String id) { 31 log.info(\u0026#34;voting for {}\u0026#34;, id); 32 return redisTemplate.opsForValue().increment(id); 33 } 34 35 @DeleteMapping(value = \u0026#34;/api/vote/{id}\u0026#34;) 36 public void resetVote(@PathVariable String id) { 37 redisTemplate.opsForValue().getAndDelete(id); 38 } 39 40 @GetMapping(value = \u0026#34;/api/votes\u0026#34;, produces = MediaType.TEXT_EVENT_STREAM_VALUE) 41 public ResponseEntity\u0026lt;SseEmitter\u0026gt; getVotes() { 42 SseEmitter emitter = new SseEmitter(15000L); 43 executor.execute(() -\u0026gt; { 44 try { 45 int id = 0; 46 while (true) { 47 SseEmitter.SseEventBuilder event = SseEmitter.event() 48 .data(\u0026#34;cat: \u0026#34; + redisTemplate.opsForValue().get(\u0026#34;cat\u0026#34;) + \u0026#34;,\u0026#34; + 49 \u0026#34;dog: \u0026#34; + redisTemplate.opsForValue().get(\u0026#34;dog\u0026#34;)) 50 .id(String.valueOf(id++)); 51 emitter.send(event); 52 TimeUnit.SECONDS.sleep(2); 53 } 54 } catch (Exception ex) { 55 emitter.completeWithError(ex); 56 } 57 }); 58 return new ResponseEntity(emitter, HttpStatus.OK); 59 } 60} 1package com.demo.project94.config; 2 3import org.springframework.context.annotation.Bean; 4import org.springframework.context.annotation.Configuration; 5import org.springframework.data.redis.connection.RedisConnectionFactory; 6import org.springframework.data.redis.core.RedisTemplate; 7import org.springframework.data.redis.serializer.StringRedisSerializer; 8 9@Configuration 10public class RedisConfiguration { 11 12 @Bean 13 public RedisTemplate\u0026lt;?, ?\u0026gt; redisTemplate(RedisConnectionFactory connectionFactory) { 14 RedisTemplate\u0026lt;?, ?\u0026gt; template = new RedisTemplate\u0026lt;\u0026gt;(); 15 template.setConnectionFactory(connectionFactory); 16 template.setDefaultSerializer(new StringRedisSerializer()); 17 return template; 18 } 19 20} Setup 1# Project 94 2 3Voting System 4 5[https://gitorko.github.io/voting-system/](https://gitorko.github.io/voting-system/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk 17.0.3 2022-04-19 LTS 14 15node --version 16v16.16.0 17 18yarn --version 191.22.18 20``` 21 22### Redis 23 24```bash 25docker run --name my-redis -p 6379:6379 -d redis redis-server --requirepass \u0026#34;password\u0026#34; 26``` 27 28### Dev 29 30To run the backend in dev mode. 31 32```bash 33./gradlew clean build 34./gradlew bootRun 35``` 36 37To Run UI in dev mode 38 39```bash 40cd ui 41yarn install 42yarn build 43yarn start 44``` 45 46Open [http://localhost:4200/](http://localhost:4200/) 47 48### Prod 49 50To run as a single jar, both UI and backend are bundled to single uber jar. 51 52```bash 53./gradlew cleanBuild 54cd build/libs 55java -jar project94-1.0.0.jar 56``` 57 58Open [http://localhost:8080/](http://localhost:8080/) 59 60### Docker 61 62```bash 63./gradlew cleanBuild 64docker build -f docker/Dockerfile --force-rm -t project94:1.0.0 . 65docker images |grep project94 66docker tag project94:1.0.0 gitorko/project94:1.0.0 67docker push gitorko/project94:1.0.0 68docker-compose -f docker/docker-compose.yml up 69``` Testing To reset the votes\n1curl --request DELETE \u0026#39;http://localhost:8080/api/vote/dog\u0026#39; 2curl --request DELETE \u0026#39;http://localhost:8080/api/vote/cat\u0026#39; To vote\n1curl --request POST \u0026#39;http://localhost:8080/api/vote/cat\u0026#39; 2curl --request POST \u0026#39;http://localhost:8080/api/vote/cat\u0026#39; JMeter Open the jmx file with Jmeter. Run the test that simulate a 10K concurrent votes and check the throughput.\nVoting System JMX\nReferences https://jmeter.apache.org/\nhttps://www.infoworld.com/article/3230455/how-to-use-redis-for-real-time-metering-applications.html\nhttps://www.infoworld.com/article/3230455/how-to-use-redis-for-real-time-metering-applications.html?page=2\nhttps://redis.io/\n","link":"https://gitorko.github.io/post/voting-system/","section":"post","tags":["jmeter","redis","SSE"],"title":"Voting System"},{"body":"Few puzzles in java, to test the fundamentals\nGithub: https://github.com/gitorko/project01\nPuzzles Puzzle: 1 (pass by value vs pass by ref) What is the output of the program?\n1package com.demo.basics.puzzle._001_passbyvalue; 2 3import org.junit.jupiter.api.Test; 4 5public class PassByPuzzle { 6 7 @Test 8 public void test() { 9 Employee emp = new Employee(\u0026#34;Dan\u0026#34;); 10 modify1(emp); 11 System.out.println(emp.name); 12 modify2(emp); 13 System.out.println(emp.name); 14 } 15 16 private void modify1(Employee emp) { 17 emp = new Employee(\u0026#34;John\u0026#34;); 18 } 19 20 private void modify2(Employee emp) { 21 emp.name = \u0026#34;Jack\u0026#34;; 22 } 23 24 class Employee { 25 String name; 26 27 public Employee(String name) { 28 this.name = name; 29 } 30 } 31} Solution\nJava always does pass by value. In the case of reference they are still passed by value but since they point to same memory location they update the same object.\nPuzzle: 2 (finally block) What is the output of the program?\n1package com.demo.basics.puzzle._002_exception; 2 3import org.junit.jupiter.api.Test; 4 5public class ExceptionPuzzle { 6 7 @Test 8 public void test() { 9 String name = getName(); 10 System.out.println(name); 11 } 12 13 private String getName() { 14 try { 15 throw new Exception(\u0026#34;ERROR\u0026#34;); 16 } finally { 17 return \u0026#34;OK\u0026#34;; 18 } 19 } 20} Solution\nThe finally block can still return a value if exception is thrown.\nPuzzle: 3 (static variable vs instance variable) What is the output of the program?\n1package com.demo.basics.puzzle._003_static; 2 3import org.junit.jupiter.api.Test; 4 5public class StaticPuzzle { 6 7 @Test 8 public void test() { 9 Employee employee1 = new Employee(\u0026#34;Dan\u0026#34;, \u0026#34;ABC\u0026#34;); 10 Employee employee2 = new Employee(\u0026#34;John\u0026#34;, \u0026#34;DEF\u0026#34;); 11 System.out.println(employee1.getName() + \u0026#34;, \u0026#34; + employee1.getCompany()); 12 System.out.println(employee2.getName() + \u0026#34;, \u0026#34; + employee2.getCompany()); 13 } 14 15} 16class Employee { 17 String name; 18 static String company; 19 20 public Employee(String name, String company) { 21 this.name = name; 22 Employee.company = company; 23 } 24 25 public String getName() { 26 return name; 27 } 28 29 public String getCompany() { 30 return company; 31 } 32 33} Solution\nThe static variables are common to all instances, so instance variables should not be static.\nPuzzle: 4 (equals \u0026amp; hashcode) What is the output of the program?\n1package com.demo.basics.puzzle._004_hashcode; 2 3import java.util.HashSet; 4import java.util.Set; 5 6import org.junit.jupiter.api.Test; 7 8public class ObjectPuzzle { 9 10 @Test 11 public void test() { 12 Set\u0026lt;Person\u0026gt; set = new HashSet\u0026lt;\u0026gt;(); 13 Person p1 = new Person(\u0026#34;Jack\u0026#34;, 34); 14 Person p2 = new Person(\u0026#34;Jack\u0026#34;, 34); 15 set.add(p1); 16 set.add(p2); 17 System.out.println(set.size()); 18 } 19 20 class Person { 21 public String name; 22 public Integer age; 23 24 public Person(String name, Integer age) { 25 this.name = name; 26 this.age = age; 27 } 28 } 29} Solution\nHow 2 object are same is determined only if you override the equals and hashcode method. Set ensured that unique elements are stored but how does the set know that both these objects are same? That's why you need to override equals and hashcode. As a follow up you can read why hashcode and equals method need to be overridden together. What happens if you dont do them together?\nPuzzle: 5 (Immutable class) Is the class immutable?\n1package com.demo.basics.puzzle._005_immutable; 2 3import java.util.Date; 4import java.util.HashMap; 5import java.util.Map; 6 7import org.junit.jupiter.api.Test; 8 9public class ImmutablePuzzle { 10 11 @Test 12 public void test() { 13 Map\u0026lt;String, String\u0026gt; props = new HashMap\u0026lt;\u0026gt;(); 14 props.put(\u0026#34;city\u0026#34;, \u0026#34;london\u0026#34;); 15 Person p1 = new Person(\u0026#34;Jack\u0026#34;, 34, new Date(), props); 16 System.out.println(p1); 17 p1.getDob().setTime(123); 18 p1.getProps().put(\u0026#34;city\u0026#34;, \u0026#34;bangalore\u0026#34;); 19 System.out.println(p1); 20 } 21 22 final class Person { 23 private final String name; 24 private final Integer age; 25 private final Date dob; 26 private final Map\u0026lt;String, String\u0026gt; props; 27 28 public Person(String name, Integer age, Date dob, Map\u0026lt;String, String\u0026gt; props) { 29 this.name = name; 30 this.age = age; 31 this.dob = dob; 32 this.props = props; 33 } 34 35 public String getName() { 36 return name; 37 } 38 39 public Integer getAge() { 40 return age; 41 } 42 43 public Date getDob() { 44 return dob; 45 } 46 47 public Map\u0026lt;String, String\u0026gt; getProps() { 48 return props; 49 } 50 51 @Override 52 public String toString() { 53 return \u0026#34;Person{\u0026#34; + 54 \u0026#34;name=\u0026#39;\u0026#34; + name + \u0026#39;\\\u0026#39;\u0026#39; + 55 \u0026#34;, age=\u0026#34; + age + 56 \u0026#34;, dob=\u0026#34; + dob + 57 \u0026#34;, props=\u0026#34; + props + 58 \u0026#39;}\u0026#39;; 59 } 60 } 61} Solution\nClass is not immutable, have to clone both hashmap and date to avoid modification.\nMake class final to avoid extending it. Remove setter method Make variables final so that they can be init only via constructor. Defensive copy of any variables that return reference objects. Deep copy vs shallow copy difference is important here. Reflection can still break immutability. Puzzle: 6 (string pool vs heap) What is the output of the program?\n1package com.demo.basics.puzzle._006_datatype; 2 3import org.junit.jupiter.api.Test; 4 5public class DataTypePuzzle { 6 7 @Test 8 public void test1() { 9 String name1 = \u0026#34;Jack\u0026#34;; 10 String name2 = new String(\u0026#34;Jack\u0026#34;); 11 System.out.println(name1 == name2); 12 System.out.println(name2.equals(name1)); 13 } 14 15 @Test 16 public void test2() { 17 long num1 = 5l; 18 Long num2 = Long.valueOf(5); 19 System.out.println(num1 == num2); 20 System.out.println(num2.equals(num1)); 21 } 22 23 @Test 24 public void test3() { 25 String name1 = new String(\u0026#34;Jack\u0026#34;); 26 String name2 = name1.intern(); 27 System.out.println(name1 == name2); 28 System.out.println(name2.equals(name1)); 29 } 30 31} Solution\nThe String pool holds the strings, using new String() creates the string in heap. The difference between == and .equals() where the first checks the address and the second checks the value. To move an element from heap to string pool we use intern. As a follow up to this, why we shouldn't store/use password as string in java instead we should store password as char array? Because string pool objects will remain in memory longer than heap objects. if you create a password in string the string pool will not GC it after the function is done. So the password can remain for a long time. If someone gets a heap dump they can look at the passwords. Hence better to store in char so that object is GC after it goes out of scope.\nPuzzle: 7 (memory leak) What is the output of the program?\n1package com.demo.basics.puzzle._007_map; 2 3import java.util.HashMap; 4import java.util.Map; 5import java.util.Objects; 6 7import org.junit.jupiter.api.Test; 8 9public class MapPuzzle { 10 11 @Test 12 public void test() { 13 Map\u0026lt;Employee, Boolean\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); 14 Employee e1 = new Employee(\u0026#34;Jack\u0026#34;, 25); 15 map.put(e1, true); 16 e1.name = \u0026#34;Rose\u0026#34;; 17 Employee e2 = new Employee(\u0026#34;John\u0026#34;, 28); 18 map.put(e2, true); 19 20 Employee john = new Employee(\u0026#34;John\u0026#34;, 28); 21 Employee jack = new Employee(\u0026#34;Rose\u0026#34;, 25); 22 Employee rose = new Employee(\u0026#34;Jack\u0026#34;, 25); 23 24 System.out.println(map.size()); 25 System.out.println(map.get(john)); 26 System.out.println(map.get(jack)); 27 System.out.println(map.get(rose)); 28 } 29 30 class Employee { 31 public String name; 32 public Integer age; 33 34 public Employee(String name, Integer age) { 35 this.name = name; 36 this.age = age; 37 } 38 39 @Override 40 public boolean equals(Object o) { 41 if (this == o) return true; 42 if (o == null || getClass() != o.getClass()) return false; 43 Employee employee = (Employee) o; 44 return name.equals(employee.name) \u0026amp;\u0026amp; age.equals(employee.age); 45 } 46 47 @Override 48 public int hashCode() { 49 return Objects.hash(name, age); 50 } 51 } 52} Solution\nJava has automatic memory management in terms of garbage collection unlike c/c++, hence ideally it means that there should be no memory leak, logic being garbage collector always identifies the objects that are not used and garbage collected. The puzzle above is a clear example of a memory leak in Java. The key of a Map has to be immutable, this is one of the hard requirements for a map. If you see someone using an object as key without equals/hashcode you raise a red flag. If the object is not immutable and used as a key in map you raise a red flag.\nHashmap is a array with each node of array pointing to a linkedlist. Jack was put on the hashmap table, but the value was changed to Rose. Rose hashcode will never point to the bucket of Jack. Jack hashcode will point to the bucket where Rose is present but then when it reaches that node it will again check if objects are same, in this case its not, so it will return null. So you will never be able to get jack. This is an example of memory leak in java. If it happens for many elements then jvm will crash. Garbage collector cant clean it because its still part of the map, but no one can reach it.\nPuzzle: 8 (ThreadLocal) What is the output of the program?\n1package com.demo.basics.puzzle._008_threadlocal; 2 3import java.text.ParseException; 4import java.text.SimpleDateFormat; 5import java.util.Arrays; 6import java.util.Date; 7import java.util.List; 8import java.util.concurrent.CountDownLatch; 9import java.util.concurrent.ExecutorService; 10import java.util.concurrent.Executors; 11 12import org.junit.jupiter.api.Test; 13 14public class ThreadPuzzle { 15 16 SimpleDateFormat df = new SimpleDateFormat(\u0026#34;dd/MM/yyyy\u0026#34;); 17 18 @Test 19 public void test() { 20 List\u0026lt;String\u0026gt; joinDates = Arrays.asList(\u0026#34;01/01/2015\u0026#34;, 21 \u0026#34;01/01/2016\u0026#34;, 22 \u0026#34;01/01/2017\u0026#34;, 23 \u0026#34;01/01/2018\u0026#34;, 24 \u0026#34;01/01/2019\u0026#34; 25 ); 26 CountDownLatch latch = new CountDownLatch(joinDates.size()); 27 ExecutorService executor = Executors.newCachedThreadPool(); 28 for (String doj : joinDates) { 29 executor.execute(() -\u0026gt; { 30 try { 31 Date dojDt = df.parse(doj); 32 System.out.println(\u0026#34;Saving : \u0026#34; + dojDt); 33 } catch (ParseException e) { 34 //e.printStackTrace(); 35 } finally { 36 latch.countDown(); 37 } 38 }); 39 } 40 try { 41 latch.await(); 42 } catch (InterruptedException e) { 43 e.printStackTrace(); 44 } 45 } 46} Solution\nThe program will work sometimes and will fail sometimes. Reason is SimpleDateFormat is not thread safe. The same object is sent to all threads and even though it looks like they are using SimpleDateFormat to just parse, internally SimpleDateFormat does few operation that are not thread safe. So now you might think i will pass a new SimpleDateFormat to each thread. However this is a costly object that will increase memory. This is where ThreadLocal comes into picture, Threadlocal will keep a copy of the object specific to that thread.\nWhat is the difference between copy of SimpleDateFormat vs using new SimpleDateFormat() each time? Copy objects will be == number of threads new objects will be \u0026gt; number of threads. So if you have 10K dates, you will end up creating 10k new objects if you do new() With ThreadLocal you will at max have 5 SimpleDateFormat objects if the thread pool is of size 5.\nThis is the advantage of using ThreadLocal.\nPuzzle: 9 (AtomicLong vs LongAdder) What is the output of the program?\n1package com.demo.basics.puzzle._009_counter; 2 3import java.util.ArrayList; 4import java.util.List; 5import java.util.concurrent.Callable; 6import java.util.concurrent.ExecutorService; 7import java.util.concurrent.Executors; 8import java.util.concurrent.TimeUnit; 9 10import org.junit.jupiter.api.Test; 11 12public class CounterPuzzle { 13 14 @Test 15 public void test() throws InterruptedException { 16 Job job = new Job(); 17 job.runJob(); 18 System.out.println(job.counter); 19 } 20 21 class Job { 22 long counter = 0l; 23 24 public void runJob() throws InterruptedException { 25 ExecutorService executor = Executors.newCachedThreadPool(); 26 List\u0026lt;Callable\u0026lt;Void\u0026gt;\u0026gt; tasks = new ArrayList\u0026lt;\u0026gt;(); 27 for (int i = 0; i \u0026lt; 250; i++) { 28 tasks.add(() -\u0026gt; { 29 counter = counter + 1; 30 return null; 31 }); 32 } 33 executor.invokeAll(tasks, 5, TimeUnit.SECONDS); 34 executor.shutdown(); 35 } 36 } 37} Solution\nThere are 2 problem in the code above. Each thread is updating counter without syncronization block/lock, so what value the thread read and what value it wrote back is not guaranteed. Your first thought might be to make the variable volatile so that copy of counter is kept in main memory and not thread memory. However if every thread is reading and writing at different times even volatile wont help. So you might think of putting a syncronization block but that will impact performance. So using a AtomicLong is a good approach here. It guarantees that the CAS (Compare and Swap) operation is atomic and hence counter will be correct. There is another hidden problem that is long + 1 is not a single operation. integer + 1 is a single operation, but long + 1 is not a single operation even within jvm. So this can lead to race condition. Long + 1 takes 2 operation in JVM to add. The AtomicLong solves the problem but again is not optimal as it can lead to contention when there are lot of requests. This is when you use LongAdder which also guarantees count is 250 but the way it does it is different. LongAdder maintains an array of counters, each thread updates a different element in the array with the count +1 and when you finally call sum, it adds the array. This means there is no contention because each thread is writing to a different block.\nPuzzle: 10 (volatile vs AtomicBoolean) What is the output of the program?\n1package com.demo.basics.puzzle._010_volatile; 2 3import java.time.Duration; 4import java.util.concurrent.TimeUnit; 5 6import org.junit.jupiter.api.Assertions; 7import org.junit.jupiter.api.Test; 8 9public class VolatilePuzzle { 10 11 private boolean sayHello = false; 12 13 @Test 14 public void test() { 15 Assertions.assertTimeoutPreemptively(Duration.ofSeconds(3), () -\u0026gt; { 16 //sayHello(); 17 }); 18 } 19 20 public void sayHello() throws InterruptedException { 21 Thread thread = new Thread(() -\u0026gt; { 22 while (!sayHello) { 23 } 24 System.out.println(\u0026#34;Hello World!\u0026#34;); 25 while (sayHello) { 26 } 27 System.out.println(\u0026#34;Good Bye!\u0026#34;); 28 }); 29 thread.start(); 30 TimeUnit.SECONDS.sleep(1); 31 sayHello = true; 32 TimeUnit.SECONDS.sleep(1); 33 sayHello = false; 34 thread.join(); 35 } 36} Solution\nThe puzzle explains the concept of thread memory cache. When you call a thread it creates a thread stack that maintains a copy of the global variable. If the variable changes within the thread the changes are flushed, however if the variable changes outside the changes will not be synchronised immediately. Thread can continue to use the local copy of the variable not knowing that it has already changed. In the puzzle above even though we are change the boolean, the thread doesnt know the boolean changed in the main thread so it continues to refer to its local copy in cache. How to prevent the thread from maintaining a local copy of the variable cache and always refer to global variable? You can use volatile or AtomicBoolean\nPuzzle: 11 (instance lock vs class lock) What is the output of the program?\n1package com.demo.basics.puzzle._011_instanceclasslock; 2 3import java.util.concurrent.ExecutorService; 4import java.util.concurrent.Executors; 5import java.util.concurrent.TimeUnit; 6 7import org.junit.jupiter.api.Test; 8 9public class InstanceClassLockPuzzle { 10 11 @Test 12 public void test() throws InterruptedException { 13 Greet greet = new Greet(); 14 ExecutorService executor = Executors.newCachedThreadPool(); 15 executor.execute(() -\u0026gt; { 16 greet.task1(); 17 }); 18 TimeUnit.SECONDS.sleep(2); 19 executor.execute(() -\u0026gt; { 20 greet.task2(); 21 }); 22 executor.execute(() -\u0026gt; { 23 greet.task2(); 24 }); 25 executor.shutdown(); 26 executor.awaitTermination(10, TimeUnit.SECONDS); 27 } 28 29 class Greet { 30 31 public void task1() { 32 synchronized (Greet.class) { 33 System.out.println(\u0026#34;task1 class lock acquired!\u0026#34;); 34 while (true) ; 35 } 36 } 37 38 public void task2() { 39 synchronized (this) { 40 System.out.println(Thread.currentThread().getName() + \u0026#34; task2 instance lock acquired!\u0026#34;); 41 try { 42 TimeUnit.SECONDS.sleep(2); 43 } catch (InterruptedException e) { 44 e.printStackTrace(); 45 } 46 System.out.println(Thread.currentThread().getName() + \u0026#34; task2 completed\u0026#34;); 47 } 48 } 49 } 50} 51 52 Solution\nThere are 2 types of locks instance lock and class lock.\nPuzzle: 13 (Double check locking) What is the problem with this program?\n1package com.demo.basics.puzzle._013_doublechecklock; 2 3import org.junit.jupiter.api.Test; 4 5public class CheckLockingPuzzle { 6 7 @Test 8 public void test() { 9 CheckLockingPuzzle.getInstance().greet(); 10 } 11 12 private static volatile CheckLockingPuzzle instance; 13 14 private CheckLockingPuzzle() { 15 } 16 17 private static CheckLockingPuzzle getInstance() { 18 synchronized (CheckLockingPuzzle.class) { 19 if (instance == null) { 20 instance = new CheckLockingPuzzle(); 21 } 22 } 23 return instance; 24 } 25 26 public void greet() { 27 System.out.println(\u0026#34;Hello World!\u0026#34;); 28 } 29} Solution\nIntroduces the concept of double check locking, where each thread accessing the syncronized block is a costly operation, hence doing a null check before and after the syncronization improves performance.\nPuzzle: 14 (Race Condition) What is the problem with this program?\n1package com.demo.basics.puzzle._014_racecondition; 2 3import java.util.ArrayList; 4import java.util.HashMap; 5import java.util.List; 6import java.util.Map; 7import java.util.concurrent.Callable; 8import java.util.concurrent.ExecutorService; 9import java.util.concurrent.Executors; 10import java.util.concurrent.TimeUnit; 11 12import org.junit.jupiter.api.Test; 13 14public class RacePuzzle { 15 16 Map\u0026lt;String, String\u0026gt; bookMap = new HashMap\u0026lt;\u0026gt;(); 17 18 @Test 19 public void test() throws InterruptedException { 20 ExecutorService executor = Executors.newCachedThreadPool(); 21 List\u0026lt;Callable\u0026lt;Void\u0026gt;\u0026gt; tasks = new ArrayList\u0026lt;\u0026gt;(); 22 tasks.add(() -\u0026gt; { 23 if (!bookMap.containsKey(\u0026#34;book1\u0026#34;)) { 24 bookMap.put(\u0026#34;book1\u0026#34;, \u0026#34;user3\u0026#34;); 25 } 26 return null; 27 }); 28 tasks.add(() -\u0026gt; { 29 if (!bookMap.containsKey(\u0026#34;book1\u0026#34;)) { 30 bookMap.put(\u0026#34;book1\u0026#34;, \u0026#34;user5\u0026#34;); 31 } 32 return null; 33 }); 34 executor.invokeAll(tasks, 5, TimeUnit.SECONDS); 35 System.out.println(bookMap.get(\u0026#34;book1\u0026#34;)); 36 } 37} Solution\nRace condition, two threads can try to update at the same time leading to data corruption. Using the atomic putIfAbsent should fix it.\nPuzzle: 15 (String pool) What is the output of this program?\n1package com.demo.basics.puzzle._016_string_pool; 2 3import org.junit.jupiter.api.Test; 4 5public class StringPoolPuzzle { 6 7 @Test 8 public void test() throws InterruptedException { 9 String str1 = new String(\u0026#34;Hello World\u0026#34;); 10 String str2 = new String(\u0026#34;Hello World\u0026#34;); 11 System.out.println(str1 == str2); 12 13 String str3 = \u0026#34;Hello World\u0026#34;; 14 String str4 = \u0026#34;Hello World\u0026#34;; 15 System.out.println(str3 == str4); 16 17 String str5 = str1.intern(); 18 String str6 = str2.intern(); 19 System.out.println(str5 == str6); 20 21 System.out.println(str5 == str3); 22 } 23} String class is immutable. When we create string with new String() separate memory is allocated on heap for each string literal. Thus, two new string objects are created in the memory i.e. str1 and str2. that holds different references When we create String without new String() it gets created in string pool which holds same reference. Then intern() method moves the string from heap to string pool. If the same string already exists on string pool then the reference is returned.\n","link":"https://gitorko.github.io/post/java-puzzles/","section":"post","tags":["java-puzzles"],"title":"Java Puzzles"},{"body":"","link":"https://gitorko.github.io/tags/java-puzzles/","section":"tags","tags":null,"title":"Java-Puzzles"},{"body":"","link":"https://gitorko.github.io/categories/puzzles/","section":"categories","tags":null,"title":"Puzzles"},{"body":"","link":"https://gitorko.github.io/tags/docker/","section":"tags","tags":null,"title":"Docker"},{"body":"","link":"https://gitorko.github.io/categories/docker/","section":"categories","tags":null,"title":"Docker"},{"body":"","link":"https://gitorko.github.io/tags/helm/","section":"tags","tags":null,"title":"Helm"},{"body":"","link":"https://gitorko.github.io/categories/helm/","section":"categories","tags":null,"title":"Helm"},{"body":"","link":"https://gitorko.github.io/categories/jenkins/","section":"categories","tags":null,"title":"Jenkins"},{"body":"","link":"https://gitorko.github.io/tags/jib/","section":"tags","tags":null,"title":"Jib"},{"body":"Spring Boot development with docker \u0026amp; kubernetes.\nGithub: https://github.com/gitorko/project61\nKubernetes Rancher Desktop Rancher Desktop allows you to run Kubernetes on your local machine. Its free and open-source.\nDisable Traefik, select dockerd as container in the settings.\nIf you get the below error when you run kubectl, its mostly due to .kubeconfig file already present from docker desktop installation.\n1I0804 20:09:34.857149 37711 versioner.go:58] Get \u0026#34;https://kubernetes.docker.internal:6443/version?timeout=5s\u0026#34;: x509: certificate signed by unknown authority 2Unable to connect to the server: x509: certificate signed by unknown authority Delete the .kube folder and restart Rancher Desktop.\n1rm -rf ~/.kube Docker Desktop Docker Desktop allows you to run Kubernetes on your local machine. Do refer the latest licensing terms as they have changed.\nOnce kubernetes is running, check kubectl.\n1export KUBECONFIG=~/.kube/config 2kubectl version Kubernetes Dashboard If you want to visualize the kubernetes infra, you can install the dashboard UI.\nhttps://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/\n1kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended.yaml 2kubectl proxy Open the dashboard url in a browser\nhttp://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/\nTo get the token to login run the below command\n1kubectl -n kube-system describe secret default|grep -i \u0026#39;token:\u0026#39;|awk \u0026#39;{print $2}\u0026#39; 2kubectl config set-credentials docker-for-desktop --token=\u0026#34;${TOKEN}\u0026#34; Now provide the token and login.\nClean up\n1kubectl --namespace kube-system get all 2kubectl delete -f https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended.yaml Build \u0026amp; Deployment Build the project\n1git clone https://github.com/gitorko/project61.git 2cd project61 3./gradlew clean build Docker There are 2 ways you can build the docker image, either run the docker build command or use the google jib library.\nTo build via docker build command\n1docker build -f k8s/Dockerfile --force-rm -t project61:1.0.0 . 2docker images | grep project61 To build via jib plugin run the below command. This way building the docker image can be part of the build process\n1./gradlew jibDockerBuild Test if the docker image is working\n1docker rm project61 2docker run -p 9090:9090 --name project61 project61:1.0.0 http://localhost:9090/api/time\nDaemon mode\n1docker run -d -p 9090:9090 --name project61 project61:1.0.0 2docker image prune Kubernetes Basics Now let's deploy the project on a kubernetes cluster. Check if kubernetes commands work\n1kubectl version 2kubectl config get-contexts 3kubectl config use-context docker-desktop 4kubectl config set-context --current --namespace=default 5kubectl get nodes 6kubectl get ns 7kubectl get all 8kubectl cluster-info We will now deploy just the docker image in kubernetes without needing any yaml files and using port forwarding access the api. Very rarely you will need to do this as most k8s deployment is done via yaml.\n1kubectl run project61-k8s --image project61:1.0.0 --image-pull-policy=Never --port=9090 2kubectl port-forward project61-k8s 9090:9090 http://localhost:9090/api/time\nYou can also create a service and access the pod. Get the port from the NodePort. Again this is to understand the fundamentals, a yaml file will be used later.\n1kubectl expose pod project61-k8s --type=NodePort 2kubectl get -o jsonpath=\u0026#34;{.spec.ports[0].nodePort}\u0026#34; services project61-k8s Change the port that you got in the last command and test this api: http://localhost:/api/time\nCheck the pods,services \u0026amp; deployments.\n1kubectl get all You can access the bash terminal of the pod\n1kubectl get pods 2kubectl exec -it project61-k8s -- /bin/bash 3ls Clean up.\n1kubectl delete pod project61-k8s 2kubectl delete service project61-k8s 3kubectl get all Kubernetes Yaml Now we will deploy via the kubernetes yaml file.\n1kubectl apply -f k8s/Deployment.yaml --dry-run=client --validate=true 2kubectl apply -f k8s/Deployment.yaml http://localhost:9090/api/time\nScale the deployment\n1kubectl scale deployment project61-k8s --replicas=3 Look at the logs\n1kubectl logs -f deployment/project61-k8s --all-containers=true --since=10m Clean up\n1kubectl delete -f k8s/Deployment.yaml Helm Now lets deploy the same project via helm charts\n1brew install helm 1helm version 2helm install project61 mychart 3helm list 4kubectl get pod,svc,deployment Get the url and invoke the api\n1curl http://$(kubectl get svc/project61-k8s -o jsonpath=\u0026#39;{.status.loadBalancer.ingress[0].hostname}\u0026#39;):9090/api/time 1http://localhost:9090/api/time Clean up\n1helm uninstall project61 Debugging To attach a debugger to the application follow the below steps\nDocker Debug To debug the docker image start the pod with the debug port on 5005 enabled.\n1docker stop project61 2docker rm project61 3docker run -p 9090:9090 -p 5005:5005 --name project61 project61:1.0.0 Enable remote JVM debug in intellij\n1-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005 http://localhost:9090/api/time\nNow when you request the api, the debug breakpoint in intellij is hit.\nKubernetes Debug To debug the kubernetes pod start port forwarding to the port 5005\n1kubectl get pod 2kubectl port-forward pod/\u0026lt;POD_NAME\u0026gt; 5005:5005 Now when you request the api, the debug breakpoint in intellij is hit.\nTelepresence To debug the kubernetes pod you can use telepresence. It will swap the prod running on kubernetes with a proxy pod that redirects traffic to your local setup.\nInstall telepresence\n1sudo curl -fL https://app.getambassador.io/download/tel2/darwin/amd64/latest/telepresence -o /usr/local/bin/telepresence 2sudo chmod a+x /usr/local/bin/telepresence Start the project61 application in debug mode in intellij, change the port to 9095 in application yaml, as we will be testing debugging locally.\nRun the telepresence command, that will swap the kubernetes pod with a proxy pod and redirect all requests on 9090 to 9095.\n1telepresence --namespace=default --swap-deployment project61-k8s --expose 9095:9090 --run-shell 2kubectl get pods Note that port here is 9090 that is the kubernetes port for incoming requests. Telepresence will redirect these to 9095 port where your local instance is running.\nhttp://localhost:9090/api/time\nNow when you request the api, the debug breakpoint in intellij is hit.\nJVM Monitoring To hook jConsole or VisualVM\nDocker To connect to JMX port, start docker image with port 9095 exposed. The docker image already has the settings to enable JMX.\n1docker stop project61 2docker rm project61 3docker run -p 9090:9090 -p 9095:9095 --name project61 project61:1.0.0 Kubernetes To connect to JMX port, start port forwarding, The docker image already has the settings to enable JMX.\n1kubectl get pod 2kubectl port-forward pod/\u0026lt;POD_NAME\u0026gt; 9095:9095 VisualVM Connect to the port\n1http://localhost:9095 JConsole 1jconsole 127.0.0.1:9095 Jenkins CI/CD Fork the github project61 repo so you have your own github project to push the code \u0026amp; clone it.\nhttps://github.com/gitorko/project61\nDownload the jenkins war file and run the below command.\nhttps://www.jenkins.io/\n1java -jar jenkins.war --httpPort=\u0026#39;8088\u0026#39; http://localhost:8088/\nFollow the default steps to install plugin and configure jenkins. The default password is printed in the console log.\nGoto Global Tool Configuration and add Java 17, Maven\nAdd the kubernetes config as a credential\n1/Users/$USER/.kube/config Install the kubernetes CLI plugin\nhttps://plugins.jenkins.io/kubernetes-cli/\nThen create a pipeline item and copy the content of Jenkinsfile, Enter the GitHub url of your forked project. Save and run the job.\n1pipeline { 2 agent any 3 4 tools { 5 jdk \u0026#34;jdk-17\u0026#34; 6 maven \u0026#34;maven3\u0026#34; 7 } 8 9 stages { 10 stage(\u0026#39;Checkout\u0026#39;) { 11 steps { 12 //TODO: Change to forked repo 13 git url: \u0026#39;https://github.com/gitorko/project61\u0026#39;, branch: \u0026#39;master\u0026#39; 14 } 15 } 16 stage(\u0026#39;Build\u0026#39;) { 17 steps { 18 sh \u0026#34;./gradlew clean build\u0026#34; 19 } 20 post { 21 // record the test results and archive the jar file. 22 success { 23 junit \u0026#39;build/test-results/test/TEST-*.xml\u0026#39; 24 archiveArtifacts \u0026#39;build/libs/*.jar\u0026#39; 25 } 26 } 27 } 28 stage(\u0026#39;Build Docker Image\u0026#39;) { 29 steps { 30 sh \u0026#34;./gradlew jibDockerBuild -Djib.to.tags=$BUILD_NUMBER\u0026#34; 31 } 32 post { 33 // record the test results and archive the jar file. 34 success { 35 junit \u0026#39;build/test-results/test/TEST-*.xml\u0026#39; 36 archiveArtifacts \u0026#39;build/libs/*.jar\u0026#39; 37 } 38 } 39 } 40 stage (\u0026#39;Push Docker Image\u0026#39;) { 41 steps { 42 //TODO: docker hub push 43 echo \u0026#34;Pushing docker image\u0026#34; 44 } 45 } 46 stage(\u0026#39;Deploy\u0026#39;) { 47 steps { 48 withKubeConfig([credentialsId: \u0026#39;kubernetes-config\u0026#39;]) { 49 sh \u0026#39;\u0026#39;\u0026#39; 50cat \u0026lt;\u0026lt;EOF | kubectl apply -f - 51apiVersion: apps/v1 52kind: Deployment 53metadata: 54 name: project61-k8s 55spec: 56 selector: 57 matchLabels: 58 app: project61-k8s 59 strategy: 60 rollingUpdate: 61 maxSurge: 1 62 maxUnavailable: 1 63 type: RollingUpdate 64 replicas: 1 65 template: 66 metadata: 67 labels: 68 app: project61-k8s 69 spec: 70 containers: 71 - name: project61 72 image: project61:$BUILD_NUMBER 73 imagePullPolicy: IfNotPresent 74 ports: 75 - containerPort: 9090 76 resources: 77 limits: 78 cpu: \u0026#34;1\u0026#34; 79 memory: \u0026#34;500Mi\u0026#34; 80--- 81kind: Service 82apiVersion: v1 83metadata: 84 name: project61-k8s 85spec: 86 ports: 87 - port: 9090 88 targetPort: 9090 89 name: http 90 selector: 91 app: project61-k8s 92 type: LoadBalancer 93 \u0026#39;\u0026#39;\u0026#39; 94 } 95 } 96 } 97 } 98} Each jenkins job run creates a docker image version by build number, kubectl terminates the old pod and starts the new pod.\n1docker images |grep project61 2kubectl get pods -w Clean up the docker images as they consume space.\n1docker rmi project61:1 2kubectl delete -f k8s/Deployment.yaml You can configure a 'GitHub hook trigger for GITScm polling' to deploy when a commit is pushed to github.\nResources Kubernetes Samples Build a custom nginx image\n1docker build -f k8s-manifest/Dockerfile1 --force-rm -t my-nginx:1 . 2docker build -f k8s-manifest/Dockerfile2 --force-rm -t my-nginx:2 . Create an alias for kubectl as k\n1alias k=\u0026#34;kubectl\u0026#34; https://kubernetes.io/docs/reference/kubectl/cheatsheet/\n01. Create a simple pod 1k apply -f k8s-manifest/01-create-pod.yaml 2k get all 3k delete -f k8s-manifest/01-create-pod.yaml 4k logs pod/counter 5k describe pod/counter 02. Create ngnix pod, use port forward to access Create nginx pod and enter pods bash prompt\n1k apply -f k8s-manifest/02-nginx-pod.yaml 2k get all 3k port-forward pod/nginx 8080:80 4k exec -it pod/nginx -- /bin/sh 5k delete -f k8s-manifest/02-nginx-pod.yaml http://localhost:8080/\n03. Create ngnix pod with html updated by another container in same pod 1k apply -f k8s-manifest/03-nginx-pod-volume.yaml 2k get pods -w 3kubectl get -o jsonpath=\u0026#34;{.spec.ports[0].nodePort}\u0026#34; service/nginx-service 4k delete -f k8s-manifest/03-nginx-pod-volume.yaml http://localhost:31000/\n04. Create job Run once and stop. output is kept till you delete it.\n1k apply -f k8s-manifest/04-job.yaml 2k get all 3k delete -f k8s-manifest/04-job.yaml 05. Liveness probe Liveness probe determines when pod is healthy, here file is deleted after 30 seconds causing pod to restart\n1k apply -f k8s-manifest/05-liveness-probe.yaml 2k get pods -w 3k delete -f k8s-manifest/05-liveness-probe.yaml 06. Readiness probe Readiness probe determines when to send traffic\n1k apply -f k8s-manifest/06-readiness-probe.yaml 2k port-forward pod/nginx 8080:80 3k delete -f k8s-manifest/06-readiness-probe.yaml http://localhost:8080/\n07. Cron Job Cron job runs every minute\n1k apply -f k8s-manifest/07-cron-job.yaml 2k get job.batch -w 3k delete -f k8s-manifest/07-cron-job.yaml 08. Config Configure configMap and secrets.\n1k apply -f k8s-manifest/08-config.yaml 2k logs pod/busybox 3k delete -f k8s-manifest/08-config.yaml config map as volume\n1k apply -f k8s-manifest/08-config-volume.yaml 2k logs -f pod/busybox 3k edit configmap app-setting 4k get configmap app-setting -o yaml 5k exec -it pod/busybox -- /bin/sh 6 7k delete -f k8s-manifest/08-config-volume.yaml 09. Deployment with Load Balancer 1k apply -f k8s-manifest/09-deployment.yaml 2k get all 3k port-forward service/nginx-service 8080:8080 4 5k scale deployment.apps/nginx --replicas=0 6k scale deployment.apps/nginx --replicas=3 7 8k delete -f k8s-manifest/09-deployment.yaml http://localhost:8080/\n10. External service Proxies to external name\n1k apply -f k8s-manifest/10-external-service.yaml 2k get services 3k delete -f k8s-manifest/10-external-service.yaml 11. Host Path Volume 1k apply -f k8s-manifest/11-volume-host-path.yaml 2k get all 3k delete -f k8s-manifest/11-volume-host-path.yaml http://localhost:31000/\n12. Persistent Volume \u0026amp; Persistent Volume Claim 1k apply -f k8s-manifest/12-pesistent-volume.yaml 2k get pv 3k get pvc 4k get all 5k delete -f k8s-manifest/12-pesistent-volume.yaml http://localhost:31000/\n14. Blue Green Deployment 1k apply -f k8s-manifest/14-deployment-blue-green.yaml 2k apply -f k8s-manifest/14-deployment-blue-green-flip.yaml 3 4k delete service/nginx-blue 5k delete deployment/nginx-v1 6 7k delete -f k8s-manifest/14-deployment-blue-green.yaml http://localhost:31000/ http://localhost:31000/\n15. Canary Deployment 1k apply -f k8s-manifest/15-deployment-canary.yaml 2 3k delete deployment/nginx-v2 4 5k delete -f k8s-manifest/15-deployment-canary.yaml 1while true; do curl http://localhost:31000/; sleep 2; done http://localhost:31000/\nReferences https://github.com/GoogleContainerTools/jib\nhttps://www.docker.com/products/docker-desktop/\nhttps://rancherdesktop.io/\nhttps://birthday.play-with-docker.com/kubernetes-docker-desktop/\nhttps://helm.sh/\nhttps://www.getambassador.io/docs/telepresence/latest/quick-start/qs-java/\nhttps://visualvm.github.io/\nhttps://www.eclipse.org/mat/\nhttps://www.jenkins.io/\nhttps://plugins.jenkins.io/kubernetes/\n","link":"https://gitorko.github.io/post/kubernetes-basics/","section":"post","tags":["docker","kubernetes","helm","telepresence","rancher","jib"],"title":"Kubernetes - Basics"},{"body":"","link":"https://gitorko.github.io/tags/rancher/","section":"tags","tags":null,"title":"Rancher"},{"body":"","link":"https://gitorko.github.io/tags/telepresence/","section":"tags","tags":null,"title":"Telepresence"},{"body":"","link":"https://gitorko.github.io/tags/bootstrap/","section":"tags","tags":null,"title":"Bootstrap"},{"body":"","link":"https://gitorko.github.io/tags/google-chart/","section":"tags","tags":null,"title":"Google-Chart"},{"body":"","link":"https://gitorko.github.io/tags/jwt/","section":"tags","tags":null,"title":"Jwt"},{"body":"","link":"https://gitorko.github.io/tags/reactjs/","section":"tags","tags":null,"title":"Reactjs"},{"body":"","link":"https://gitorko.github.io/categories/reactjs/","section":"categories","tags":null,"title":"ReactJS"},{"body":"Spring Boot web application with reactjs and JWT authentication support, uses bootstrap and google chart. Creates uber jar to deploy.\nGithub: https://github.com/gitorko/project89\nQuick Overview To deploy the application in a single command, clone the project, make sure no conflicting docker containers or ports are running and then run\n1git clone https://github.com/gitorko/project89 2cd project89 3docker-compose -f docker/docker-compose.yml up Open http://localhost:8080/\nFeatures A Spring Web application with reactjs. Supports JWT authentication and provides login \u0026amp; logout features. Uses Spring Data to persist data into the postgres db. Spring dev tools allow seamless reload on any changes for java files.\nReactJS app supports basic JWT authentication Bootstrap 5 CRUD UI for adding and removing customer to db. Charts for bar,pie,stack charts with data from rest api Implementation Design Code To allow spring dev tools to reload on change you need to enable 'Update classes and resources' in Intellij as shown below\n1package com.demo.project89.controller; 2 3import java.util.Date; 4 5import com.demo.project89.domain.Customer; 6import com.demo.project89.repo.CustomerRepository; 7import lombok.RequiredArgsConstructor; 8import lombok.extern.slf4j.Slf4j; 9import org.springframework.security.access.prepost.PreAuthorize; 10import org.springframework.web.bind.annotation.DeleteMapping; 11import org.springframework.web.bind.annotation.GetMapping; 12import org.springframework.web.bind.annotation.PathVariable; 13import org.springframework.web.bind.annotation.PostMapping; 14import org.springframework.web.bind.annotation.RequestBody; 15import org.springframework.web.bind.annotation.RestController; 16 17@RestController 18@Slf4j 19@RequiredArgsConstructor 20public class HomeController { 21 22 final CustomerRepository customerRepo; 23 24 @GetMapping(value = \u0026#34;/api/time\u0026#34;) 25 public Date serverTime() { 26 log.info(\u0026#34;Getting server time!\u0026#34;); 27 return new Date(); 28 } 29 30 @GetMapping(value = \u0026#34;/api/customer\u0026#34;) 31 @PreAuthorize(\u0026#34;hasRole(\u0026#39;ROLE_USER\u0026#39;) or hasRole(\u0026#39;ROLE_ADMIN\u0026#39;)\u0026#34;) 32 public Iterable\u0026lt;Customer\u0026gt; getCustomers() { 33 return customerRepo.findAll(); 34 } 35 36 @PreAuthorize(\u0026#34;hasRole(\u0026#39;ROLE_ADMIN\u0026#39;)\u0026#34;) 37 @PostMapping(value = \u0026#34;/api/customer\u0026#34;) 38 public Customer saveCustomer(@RequestBody Customer customer) { 39 log.info(\u0026#34;Saving customer!\u0026#34;); 40 return customerRepo.save(customer); 41 } 42 43 @PreAuthorize(\u0026#34;hasRole(\u0026#39;ROLE_ADMIN\u0026#39;)\u0026#34;) 44 @DeleteMapping(value = \u0026#34;/api/customer/{id}\u0026#34;) 45 public void deleteCustomer(@PathVariable Long id) { 46 log.info(\u0026#34;Deleting customer: {}\u0026#34;, id); 47 customerRepo.deleteById(id); 48 } 49 50} Spring security is configured for JWT authentication.\n1package com.demo.project89.security; 2 3import com.demo.project89.service.UserDetailsServiceImpl; 4import lombok.RequiredArgsConstructor; 5import org.springframework.context.annotation.Bean; 6import org.springframework.context.annotation.Configuration; 7import org.springframework.security.authentication.AuthenticationManager; 8import org.springframework.security.authentication.AuthenticationProvider; 9import org.springframework.security.authentication.dao.DaoAuthenticationProvider; 10import org.springframework.security.config.annotation.authentication.configuration.AuthenticationConfiguration; 11import org.springframework.security.config.annotation.method.configuration.EnableGlobalMethodSecurity; 12import org.springframework.security.config.annotation.web.builders.HttpSecurity; 13import org.springframework.security.config.annotation.web.configuration.EnableWebSecurity; 14import org.springframework.security.config.http.SessionCreationPolicy; 15import org.springframework.security.crypto.bcrypt.BCryptPasswordEncoder; 16import org.springframework.security.crypto.password.PasswordEncoder; 17import org.springframework.security.web.SecurityFilterChain; 18import org.springframework.security.web.authentication.UsernamePasswordAuthenticationFilter; 19 20@Configuration 21@EnableGlobalMethodSecurity(prePostEnabled = true) 22@RequiredArgsConstructor 23@EnableWebSecurity 24public class SecurityConfig { 25 26 public static final String USER_ROLE = \u0026#34;ADMIN\u0026#34;; 27 public static final String USER_NAME = \u0026#34;admin\u0026#34;; 28 public static final String USER_PASSWORD = \u0026#34;admin@123\u0026#34;; 29 final UserDetailsServiceImpl userDetailsService; 30 final JwtAuthEntryPoint authenticationEntryPoint; 31 32 @Bean 33 public PasswordEncoder passwordEncoder() { 34 return new BCryptPasswordEncoder(); 35 } 36 37 @Bean 38 public AuthenticationManager authenticationManager(AuthenticationConfiguration authenticationConfiguration) throws Exception { 39 return authenticationConfiguration.getAuthenticationManager(); 40 } 41 42 @Bean 43 public JwtTokenFilter jwtTokenFilter() { 44 return new JwtTokenFilter(); 45 } 46 47 @Bean 48 SecurityFilterChain securityFilterChain(HttpSecurity http) throws Exception { 49 http 50 .csrf(csrf -\u0026gt; csrf.disable()) 51 .exceptionHandling(e -\u0026gt; e.authenticationEntryPoint(authenticationEntryPoint)) 52 .sessionManagement(s -\u0026gt; s.sessionCreationPolicy(SessionCreationPolicy.STATELESS)) 53 .authorizeHttpRequests(authorize -\u0026gt; authorize 54 .requestMatchers(\u0026#34;/api/auth/**\u0026#34;).permitAll() 55 .requestMatchers(\u0026#34;/api/time\u0026#34;).permitAll() 56 .requestMatchers(\u0026#34;/api/**\u0026#34;).authenticated() 57 .anyRequest().permitAll() 58 ); 59 http.addFilterBefore(jwtTokenFilter(), UsernamePasswordAuthenticationFilter.class); 60 return http.build(); 61 } 62 63 @Bean 64 public AuthenticationProvider authenticationProvider() { 65 DaoAuthenticationProvider authProvider = new DaoAuthenticationProvider(); 66 authProvider.setUserDetailsService(userDetailsService); 67 authProvider.setPasswordEncoder(passwordEncoder()); 68 return authProvider; 69 } 70 71} 1import MenuBar from \u0026#34;../components/MenuBar\u0026#34; 2import PropTypes from \u0026#39;prop-types\u0026#39; 3import {useEffect, useState} from \u0026#39;react\u0026#39; 4import {Alert, Button, Col, Container, Form, Row, Table} from \u0026#34;react-bootstrap\u0026#34; 5import {Trash} from \u0026#39;react-bootstrap-icons\u0026#39; 6import RestService from \u0026#34;../services/RestService\u0026#34; 7import AuthService from \u0026#34;../services/AuthService\u0026#34; 8import {useNavigate} from \u0026#34;react-router-dom\u0026#34;; 9 10function Home() { 11 12 let navigate = useNavigate(); 13 const [customers, setCustomers] = useState([]) 14 const [time, setTime] = useState() 15 const [customer, setCustomer] = useState({ 16 firstName: \u0026#39;\u0026#39;, 17 lastName: \u0026#39;\u0026#39; 18 }) 19 const [flashMsg, setFlashMsg] = useState({ 20 success: \u0026#39;\u0026#39;, 21 error: \u0026#39;\u0026#39; 22 }) 23 24 const getCustomers = async () =\u0026gt; { 25 const customersFromServer = await RestService.fetchCustomers() 26 setCustomers(customersFromServer) 27 } 28 29 const deleteCustomer = async (id: any) =\u0026gt; { 30 RestService.deleteCustomer(id).then((res) =\u0026gt; { 31 if (res) { 32 setCustomers(customers.filter((customer) =\u0026gt; { 33 // @ts-ignore 34 return customer.id !== id; 35 })) 36 setFlashMsg({ 37 ...flashMsg, 38 \u0026#39;success\u0026#39;: \u0026#39;Deleted user: \u0026#39; + id 39 }) 40 } else { 41 alert(\u0026#39;Error in delete!\u0026#39;) 42 } 43 }) 44 } 45 46 const onSubmit = (e: any) =\u0026gt; { 47 e.preventDefault() 48 if (!customer.firstName || !customer.lastName) { 49 alert(\u0026#39;Please enter the values\u0026#39;) 50 return 51 } 52 addCustomer(customer) 53 setCustomer({ 54 firstName: \u0026#39;\u0026#39;, 55 lastName: \u0026#39;\u0026#39; 56 }) 57 setFlashMsg({ 58 ...flashMsg, 59 \u0026#39;success\u0026#39;: \u0026#39;Successfully added user by: \u0026#39; + AuthService.getUser() 60 }) 61 } 62 63 const addCustomer = async (customer: any) =\u0026gt; { 64 RestService.addCustomer(customer).then((data) =\u0026gt; { 65 // @ts-ignore 66 setCustomers([...customers, data]) 67 }) 68 } 69 70 const handleChange = (e: any) =\u0026gt; { 71 setCustomer({ 72 ...customer, 73 [e.target.name]: e.target.value 74 }); 75 } 76 77 useEffect(() =\u0026gt; { 78 if (!AuthService.isAuthenticated()) { 79 navigate(\u0026#39;/login\u0026#39;); 80 return 81 } 82 setFlashMsg({ 83 success: \u0026#39;\u0026#39;, 84 error: \u0026#39;\u0026#39; 85 }) 86 RestService.getTime().then(res =\u0026gt; setTime(res)) 87 getCustomers() 88 }, []) 89 90 // @ts-ignore 91 Home.propTypes = { 92 title: PropTypes.string, 93 onClick: PropTypes.func, 94 } 95 96 return ( 97 \u0026lt;\u0026gt; 98 \u0026lt;MenuBar/\u0026gt; 99 \u0026lt;br/\u0026gt; 100 \u0026lt;Container\u0026gt; 101 \u0026lt;Row\u0026gt; 102 \u0026lt;Col className={\u0026#34;text-center\u0026#34;}\u0026gt; 103 \u0026lt;p className=\u0026#34;text-end\u0026#34;\u0026gt;Server Time : {time}\u0026lt;/p\u0026gt; 104 \u0026lt;/Col\u0026gt; 105 \u0026lt;/Row\u0026gt; 106 \u0026lt;br/\u0026gt; 107 108 {flashMsg.success \u0026amp;\u0026amp; ( 109 \u0026lt;Row\u0026gt; 110 \u0026lt;Col\u0026gt; 111 \u0026lt;Alert key=\u0026#34;home-flash\u0026#34; variant=\u0026#34;success\u0026#34;\u0026gt; 112 {flashMsg.success} 113 \u0026lt;/Alert\u0026gt; 114 \u0026lt;/Col\u0026gt; 115 \u0026lt;/Row\u0026gt; 116 )} 117 118 \u0026lt;Row\u0026gt; 119 \u0026lt;Col className={\u0026#34;text-center\u0026#34;}\u0026gt; 120 \u0026lt;h2\u0026gt;Customers\u0026lt;/h2\u0026gt; 121 \u0026lt;/Col\u0026gt; 122 \u0026lt;/Row\u0026gt; 123 \u0026lt;br/\u0026gt; 124 125 \u0026lt;Row\u0026gt; 126 \u0026lt;Col md={\u0026#34;4\u0026#34;}\u0026gt; 127 \u0026lt;Form onSubmit={onSubmit}\u0026gt; 128 \u0026lt;Form.Group controlId=\u0026#34;formFirstName\u0026#34; className={\u0026#34;mb-3\u0026#34;}\u0026gt; 129 \u0026lt;Form.Label\u0026gt;First Name\u0026lt;/Form.Label\u0026gt; 130 \u0026lt;Form.Control type=\u0026#34;text\u0026#34; placeholder=\u0026#34;Enter First Name\u0026#34; name=\u0026#34;firstName\u0026#34; 131 value={customer.firstName} onChange={handleChange}/\u0026gt; 132 \u0026lt;Form.Text className=\u0026#34;text-muted\u0026#34;\u0026gt; 133 Enter first name! 134 \u0026lt;/Form.Text\u0026gt; 135 \u0026lt;/Form.Group\u0026gt; 136 137 \u0026lt;Form.Group controlId=\u0026#34;formLastName\u0026#34; className={\u0026#34;mb-3\u0026#34;}\u0026gt; 138 \u0026lt;Form.Label\u0026gt;LastName\u0026lt;/Form.Label\u0026gt; 139 \u0026lt;Form.Control type=\u0026#34;text\u0026#34; placeholder=\u0026#34;LastName\u0026#34; name=\u0026#34;lastName\u0026#34; 140 value={customer.lastName} onChange={handleChange}/\u0026gt; 141 \u0026lt;/Form.Group\u0026gt; 142 143 \u0026lt;Button variant=\u0026#34;primary\u0026#34; type=\u0026#34;submit\u0026#34;\u0026gt; 144 Submit 145 \u0026lt;/Button\u0026gt; 146 \u0026lt;/Form\u0026gt; 147 \u0026lt;/Col\u0026gt; 148 \u0026lt;Col md={\u0026#34;8\u0026#34;}\u0026gt; 149 \u0026lt;Table striped bordered hover\u0026gt; 150 \u0026lt;thead\u0026gt; 151 \u0026lt;tr\u0026gt; 152 \u0026lt;th\u0026gt;First Name\u0026lt;/th\u0026gt; 153 \u0026lt;th\u0026gt;Last Name\u0026lt;/th\u0026gt; 154 \u0026lt;th\u0026gt;Action\u0026lt;/th\u0026gt; 155 \u0026lt;/tr\u0026gt; 156 \u0026lt;/thead\u0026gt; 157 \u0026lt;tbody\u0026gt; 158 {customers.map((customer: any) =\u0026gt; ( 159 \u0026lt;tr key={customer.id}\u0026gt; 160 \u0026lt;td\u0026gt;{customer.firstName}\u0026lt;/td\u0026gt; 161 \u0026lt;td\u0026gt;{customer.lastName}\u0026lt;/td\u0026gt; 162 \u0026lt;td\u0026gt;\u0026lt;Trash onClick={() =\u0026gt; deleteCustomer(customer.id)} 163 style={{color: \u0026#39;red\u0026#39;, cursor: \u0026#39;pointer\u0026#39;}}/\u0026gt;\u0026lt;/td\u0026gt; 164 \u0026lt;/tr\u0026gt; 165 ))} 166 \u0026lt;/tbody\u0026gt; 167 \u0026lt;/Table\u0026gt; 168 \u0026lt;/Col\u0026gt; 169 \u0026lt;/Row\u0026gt; 170 \u0026lt;/Container\u0026gt; 171 \u0026lt;/\u0026gt; 172 ) 173} 174 175export default Home 1import MenuBar from \u0026#34;../components/MenuBar\u0026#34;; 2import {Col, Container, Row} from \u0026#34;react-bootstrap\u0026#34;; 3import {useEffect, useState} from \u0026#34;react\u0026#34;; 4import RestService from \u0026#34;../services/RestService\u0026#34; 5import {useNavigate} from \u0026#39;react-router-dom\u0026#39;; 6import {Chart} from \u0026#34;react-google-charts\u0026#34;; 7import AuthService from \u0026#34;../services/AuthService\u0026#34;; 8 9function ChartApp() { 10 11 let navigate = useNavigate(); 12 const [pieData, setPieData] = useState\u0026lt;any\u0026gt;([]) 13 const [barData, setBarData] = useState\u0026lt;any\u0026gt;([]) 14 const [lineData, setLineData] = useState\u0026lt;any\u0026gt;([]) 15 const [columnData, setColumnData] = useState\u0026lt;any\u0026gt;([]) 16 17 const pieOptions = { 18 title: \u0026#39;My Pie Chart\u0026#39;, 19 }; 20 21 const barOptions = { 22 title: \u0026#39;My Bar Chart\u0026#39;, 23 }; 24 25 const lineOptions = { 26 title: \u0026#39;My Line Chart\u0026#39;, 27 } 28 29 const columnOptions = { 30 title: \u0026#39;My Column Chart\u0026#39;, 31 } 32 33 const pieChart = () =\u0026gt; { 34 RestService.getPieDataFromServer().then(res =\u0026gt; { 35 const chartData = [[\u0026#39;Region\u0026#39;, \u0026#39;Amount\u0026#39;]] 36 for (let i = 0; i \u0026lt; res[1].length; i += 1) { 37 chartData.push([res[0][i], res[1][i]]) 38 } 39 setPieData({data: chartData}) 40 }) 41 } 42 43 const barChart = () =\u0026gt; { 44 RestService.getPieDataFromServer().then(res =\u0026gt; { 45 const chartData = [[\u0026#39;Region\u0026#39;, \u0026#39;Amount\u0026#39;]] 46 for (let i = 0; i \u0026lt; res[1].length; i += 1) { 47 chartData.push([res[0][i], res[1][i]]) 48 } 49 setBarData({data: chartData}) 50 }) 51 } 52 53 const lineChart = () =\u0026gt; { 54 RestService.getPieDataFromServer().then(res =\u0026gt; { 55 const chartData = [[\u0026#39;Region\u0026#39;, \u0026#39;Amount\u0026#39;]] 56 for (let i = 0; i \u0026lt; res[1].length; i += 1) { 57 chartData.push([res[0][i], res[1][i]]) 58 } 59 setLineData({data: chartData}) 60 }) 61 } 62 63 const columnChart = () =\u0026gt; { 64 RestService.getColumnDataFromServer().then(res =\u0026gt; { 65 const chartData = [] 66 const rowData = [] 67 rowData.push(\u0026#34;Fruit\u0026#34;) 68 for (let i = 0; i \u0026lt; res[0][\u0026#34;data\u0026#34;].length; i++) { 69 rowData.push(res[0][\u0026#34;data\u0026#34;][i]); 70 } 71 chartData.push(rowData) 72 for (let i = 1; i \u0026lt; res.length; i++) { 73 const rowValData = [] 74 rowValData.push(res[i][\u0026#34;name\u0026#34;]); 75 for(let j = 0; j\u0026lt; res[i][\u0026#34;data\u0026#34;].length; j++) { 76 rowValData.push(res[i][\u0026#34;data\u0026#34;][j]); 77 } 78 chartData.push(rowValData) 79 } 80 setColumnData({data: chartData}) 81 }) 82 } 83 84 useEffect(() =\u0026gt; { 85 if (!AuthService.isAuthenticated()) { 86 navigate(\u0026#34;/login\u0026#34;); 87 return 88 } 89 pieChart() 90 barChart() 91 lineChart() 92 columnChart() 93 }, []) 94 95 return ( 96 \u0026lt;\u0026gt; 97 \u0026lt;MenuBar/\u0026gt; 98 \u0026lt;Container\u0026gt; 99 \u0026lt;br/\u0026gt; 100 \u0026lt;Row\u0026gt; 101 \u0026lt;Col md={\u0026#34;6\u0026#34;}\u0026gt; 102 \u0026lt;Chart 103 chartType=\u0026#34;PieChart\u0026#34; 104 data={pieData.data} 105 options={pieOptions} 106 width=\u0026#34;100%\u0026#34; 107 height=\u0026#34;400px\u0026#34; 108 legendToggle 109 /\u0026gt; 110 \u0026lt;/Col\u0026gt; 111 \u0026lt;Col md={\u0026#34;6\u0026#34;}\u0026gt; 112 \u0026lt;Chart 113 chartType=\u0026#34;BarChart\u0026#34; 114 data={barData.data} 115 options={barOptions} 116 width=\u0026#34;100%\u0026#34; 117 height=\u0026#34;400px\u0026#34; 118 legendToggle 119 /\u0026gt; 120 \u0026lt;/Col\u0026gt; 121 \u0026lt;/Row\u0026gt; 122 \u0026lt;br/\u0026gt; 123 \u0026lt;br/\u0026gt; 124 \u0026lt;Row\u0026gt; 125 \u0026lt;Col md={\u0026#34;6\u0026#34;}\u0026gt; 126 \u0026lt;Chart 127 chartType=\u0026#34;LineChart\u0026#34; 128 data={lineData.data} 129 options={lineOptions} 130 width=\u0026#34;100%\u0026#34; 131 height=\u0026#34;400px\u0026#34; 132 legendToggle 133 /\u0026gt; 134 \u0026lt;/Col\u0026gt; 135 \u0026lt;Col md={\u0026#34;6\u0026#34;}\u0026gt; 136 \u0026lt;Chart 137 chartType=\u0026#34;ColumnChart\u0026#34; 138 data={columnData.data} 139 options={columnOptions} 140 width=\u0026#34;100%\u0026#34; 141 height=\u0026#34;400px\u0026#34; 142 legendToggle 143 /\u0026gt; 144 \u0026lt;/Col\u0026gt; 145 \u0026lt;/Row\u0026gt; 146 \u0026lt;/Container\u0026gt; 147 \u0026lt;/\u0026gt; 148 ) 149} 150 151export default ChartApp We will use the bootstrap 5 library and use the many components it provides.\n1import {Alert, Button, Col, Container, Form, Row} from \u0026#34;react-bootstrap\u0026#34;; 2import LoginBar from \u0026#34;../components/LoginBar\u0026#34;; 3import {useState} from \u0026#39;react\u0026#39; 4 5import AuthService from \u0026#34;../services/AuthService\u0026#34;; 6import {useNavigate} from \u0026#34;react-router-dom\u0026#34;; 7 8function Login() { 9 let navigate = useNavigate(); 10 11 const [cred, setCred] = useState({ 12 username: \u0026#39;\u0026#39;, 13 password: \u0026#39;\u0026#39;, 14 }) 15 16 const [flashMsg, setFlashMsg] = useState({ 17 success: \u0026#39;\u0026#39;, 18 error: \u0026#39;\u0026#39; 19 }) 20 21 const handleChange = (e: any) =\u0026gt; { 22 const value = e.target.value; 23 setCred({ 24 ...cred, 25 [e.target.name]: value 26 }); 27 } 28 29 const onSubmit = (e: any) =\u0026gt; { 30 e.preventDefault() 31 if (!cred.username || !cred.password) { 32 alert(\u0026#39;Please enter the values\u0026#39;) 33 return 34 } 35 AuthService.login(cred).then((status) =\u0026gt; { 36 if (status) { 37 setCred({ 38 username: \u0026#39;\u0026#39;, 39 password: \u0026#39;\u0026#39; 40 }) 41 navigate(\u0026#39;/\u0026#39;); 42 } else { 43 setFlashMsg({ 44 ...flashMsg, 45 \u0026#39;error\u0026#39;: \u0026#39;Login Failed!\u0026#39; 46 }) 47 } 48 }, error =\u0026gt; { 49 console.log(\u0026#34;Error on login submit!\u0026#34;) 50 }) 51 } 52 53 return ( 54 \u0026lt;\u0026gt; 55 \u0026lt;LoginBar/\u0026gt; 56 \u0026lt;Container\u0026gt; 57 \u0026lt;Form style={{maxWidth: \u0026#39;400px\u0026#39;, margin: \u0026#39;auto\u0026#39;}} onSubmit={onSubmit}\u0026gt; 58 \u0026lt;br/\u0026gt; 59 \u0026lt;br/\u0026gt; 60 \u0026lt;h2\u0026gt;Login\u0026lt;/h2\u0026gt; 61 62 \u0026lt;Form.Group controlId=\u0026#34;formUsername\u0026#34; className={\u0026#34;mb-3\u0026#34;}\u0026gt; 63 \u0026lt;Form.Label\u0026gt;Username\u0026lt;/Form.Label\u0026gt; 64 \u0026lt;Form.Control type=\u0026#34;username\u0026#34; placeholder=\u0026#34;Enter Username\u0026#34; name=\u0026#34;username\u0026#34; value={cred.username} 65 onChange={handleChange}/\u0026gt; 66 \u0026lt;Form.Text className=\u0026#34;text-muted\u0026#34;\u0026gt; 67 Enter AD user name! 68 \u0026lt;/Form.Text\u0026gt; 69 \u0026lt;/Form.Group\u0026gt; 70 71 \u0026lt;Form.Group controlId=\u0026#34;formBasicPassword\u0026#34; className={\u0026#34;mb-3\u0026#34;}\u0026gt; 72 \u0026lt;Form.Label\u0026gt;Password\u0026lt;/Form.Label\u0026gt; 73 \u0026lt;Form.Control type=\u0026#34;password\u0026#34; placeholder=\u0026#34;Password\u0026#34; name=\u0026#34;password\u0026#34; value={cred.password} 74 onChange={handleChange}/\u0026gt; 75 \u0026lt;/Form.Group\u0026gt; 76 77 \u0026lt;Button variant=\u0026#34;primary\u0026#34; type=\u0026#34;submit\u0026#34;\u0026gt; 78 Submit 79 \u0026lt;/Button\u0026gt; 80 \u0026lt;br/\u0026gt; 81 \u0026lt;br/\u0026gt; 82 {flashMsg.error \u0026amp;\u0026amp; ( 83 \u0026lt;Row\u0026gt; 84 \u0026lt;Col\u0026gt; 85 \u0026lt;Alert key=\u0026#34;home-flash\u0026#34; variant=\u0026#34;danger\u0026#34;\u0026gt; 86 {flashMsg.error} 87 \u0026lt;/Alert\u0026gt; 88 \u0026lt;/Col\u0026gt; 89 \u0026lt;/Row\u0026gt; 90 )} 91 \u0026lt;/Form\u0026gt; 92 \u0026lt;/Container\u0026gt; 93 \u0026lt;/\u0026gt; 94 ) 95} 96 97export default Login Setup 1# Project 89 2 3SpringBoot Web + JWT + React.js + Bootstrap + Postgres + Google Charts 4 5[https://gitorko.github.io/spring-boot-reactjs/](https://gitorko.github.io/spring-boot-reactjs/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk version \u0026#34;21.0.3\u0026#34; 2024-04-16 LTS 14 15node --version 16v16.16.0 17 18yarn --version 191.22.18 20``` 21 22### Postgres DB 23 24``` 25docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:9.6.10 26docker ps 27docker exec -it pg-container psql -U postgres -W postgres 28CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 29CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 30grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 31 32docker stop pg-container 33docker start pg-container 34``` 35 36### Dev 37 38To run the backend in dev mode. 39 40```bash 41./gradlew clean build 42./gradlew bootRun 43``` 44 45To Run UI in dev mode 46 47```bash 48cd ui 49yarn install 50yarn build 51yarn start 52``` 53 54Open [http://localhost:3000](http://localhost:3000) 55 56### Prod 57 58To run as a single jar, both UI and backend are bundled to single uber jar. 59 60```bash 61./gradlew cleanBuild 62cd build/libs 63java -jar project89-1.0.0.jar 64``` 65 66Open [http://localhost:8080/](http://localhost:8080/) 67 68``` 69user: admin 70pwd: admin@123 71 72user: user 73pwd: user@123 74``` 75 76### Docker 77 78```bash 79./gradlew cleanBuild 80docker build -f docker/Dockerfile --force-rm -t project89:1.0.0 . 81docker images |grep project89 82docker tag project89:1.0.0 gitorko/project89:1.0.0 83docker push gitorko/project89:1.0.0 84docker-compose -f docker/docker-compose.yml up 85``` 86 87### Commands 88 89Commands to create new ui project if needed 90 91```bash 92yarn create react-app ui --template typescript 93yarn add jsonwebtoken types/jsonwebtoken 94yarn add react-router-dom 95yarn add react-bootstrap bootstrap 96yarn add react-chartjs-2 chart.js 97yarn add react-bootstrap-icons 98yarn add prop-types 99``` 100 101proxy is added to package.json to allow the requests to be redirected to the backend 102 103```bash 104\u0026#34;proxy\u0026#34;: \u0026#34;http://localhost:8080/\u0026#34; 105``` Testing 1curl --location --request POST \u0026#39;http://localhost:8080/api/login\u0026#39; \\ 2--header \u0026#39;Content-Type: application/json\u0026#39; \\ 3--data-raw \u0026#39;{ 4 \u0026#34;username\u0026#34;: \u0026#34;admin\u0026#34;, 5 \u0026#34;password\u0026#34;: \u0026#34;admin@123\u0026#34; 6}\u0026#39; 1curl --location --request GET \u0026#39;http://localhost:8080/api/time\u0026#39; \\ 2--header \u0026#39;Authorization: Bearer \u0026lt;TOKEN\u0026gt;\u0026#39; References https://react-bootstrap.github.io/\nhttps://react-google-charts.com/\n","link":"https://gitorko.github.io/post/spring-reactjs/","section":"post","tags":["spring","spring-boot","jwt","reactjs","google-chart","bootstrap","spring-security"],"title":"Spring - ReactJS"},{"body":"","link":"https://gitorko.github.io/tags/angular/","section":"tags","tags":null,"title":"Angular"},{"body":"Chat Server developed with Spring Boot, Websocket and Angular (Clarity) frontend.\nGithub: https://github.com/gitorko/project92\nQuick Overview To deploy the application in a single command, clone the project, make sure no conflicting docker containers or ports are running and then run\n1git clone https://github.com/gitorko/project92 2cd project92 3docker-compose -f docker/docker-compose.yml up Requirements Realtime data fetch from server via bidirectional communication is one of the key requirements for a chat server. To fetch information from the server continuously we can use the following approaches.\nShort-Polling - Client continuously asks the server for new data. Long-Polling - Client continuously asks the server for new data, but server waits for a few seconds and if data becomes available by then it will return the data. Websocket - HTTP connection is upgraded to bidirectional connection. Server Sent Events - HTTP connection is kept open by the server and data is pushed to client continuously over it. Websocket Server Sent Event Long-Poll Full-duplex,Bidirectional Half-duplex,Unidirectional Half-duplex,Unidirectional Server Push \u0026amp; Client Send Server Push Client Pull Text + Binary Text Text + Binary 65,536 (max number of TCP ports) 6-8 parallel per domain Based on threads available Connect will open the websocket connection \u0026amp; disconnect should terminate the session. Two users should be able to send and receive messages. Implementation Design Code You can enable plain websockets via @EnableWebSocket however in the example below we are using STOMP over WebSocket protocol by using @EnableWebSocketMessageBroker. STOMP is a subprotocol operating on top of the lower-level WebSocket. Here we create an in-memory message broker for sending and receiving messages. Instead of the annotation @SendTo, you can also use SimpMessagingTemplate which you can autowire inside your controller.\n1package com.demo.project92.controller; 2 3import java.time.LocalDateTime; 4 5import com.demo.project92.domain.ChatMessage; 6import lombok.RequiredArgsConstructor; 7import lombok.extern.slf4j.Slf4j; 8import org.springframework.messaging.handler.annotation.MessageMapping; 9import org.springframework.messaging.handler.annotation.SendTo; 10import org.springframework.messaging.simp.SimpMessageHeaderAccessor; 11import org.springframework.messaging.simp.SimpMessagingTemplate; 12import org.springframework.web.bind.annotation.RestController; 13 14@RestController 15@RequiredArgsConstructor 16@Slf4j 17class HomeController { 18 19 @MessageMapping(\u0026#34;/send/message\u0026#34;) 20 @SendTo(\u0026#34;/message\u0026#34;) 21 public ChatMessage broadcastMessage(SimpMessageHeaderAccessor sha, ChatMessage chat) { 22 chat.setFrom(sha.getUser().getName()); 23 chat.setSentAt(LocalDateTime.now().toString()); 24 log.info(\u0026#34;Received message: {}\u0026#34;, chat); 25 return chat; 26 } 27} 1package com.demo.project92.config; 2 3import org.springframework.context.annotation.Configuration; 4import org.springframework.messaging.simp.config.MessageBrokerRegistry; 5import org.springframework.web.socket.config.annotation.EnableWebSocketMessageBroker; 6import org.springframework.web.socket.config.annotation.StompEndpointRegistry; 7import org.springframework.web.socket.config.annotation.WebSocketMessageBrokerConfigurer; 8 9@Configuration 10@EnableWebSocketMessageBroker 11public class WebSocketConfig implements WebSocketMessageBrokerConfigurer { 12 13 @Override 14 public void configureMessageBroker(MessageBrokerRegistry config) { 15 config.setApplicationDestinationPrefixes(\u0026#34;/app\u0026#34;) 16 .enableSimpleBroker(\u0026#34;/message\u0026#34;); 17 } 18 19 @Override 20 public void registerStompEndpoints(StompEndpointRegistry registry) { 21 //SockJS is used to enable fallback options if browsers don’t support websocket. 22 registry.addEndpoint(\u0026#34;/chat-app\u0026#34;) 23 .setAllowedOrigins(\u0026#34;http://localhost:4200\u0026#34;) 24 .setHandshakeHandler(new CustomHandshakeHandler()) // Set custom handshake handler 25 .withSockJS(); 26 } 27} 1\u0026lt;div class=\u0026#34;content-container\u0026#34;\u0026gt; 2 \u0026lt;div class=\u0026#34;content-area\u0026#34;\u0026gt; 3 \u0026lt;div class=\u0026#34;clr-row\u0026#34;\u0026gt; 4 \u0026lt;div class=\u0026#34;clr-col-12\u0026#34;\u0026gt; 5 \u0026lt;p\u0026gt;Online status: {{chatStatus}}\u0026lt;/p\u0026gt; 6 \u0026lt;button class=\u0026#34;btn\u0026#34; (click)=\u0026#34;connect()\u0026#34; [disabled]=\u0026#34;chatStatus === \u0026#39;Connected\u0026#39;\u0026#34;\u0026gt;Connect\u0026lt;/button\u0026gt; 7 \u0026lt;button class=\u0026#34;btn\u0026#34; (click)=\u0026#34;disconnect()\u0026#34; [disabled]=\u0026#34;chatStatus !== \u0026#39;Connected\u0026#39;\u0026#34;\u0026gt;Disconnect\u0026lt;/button\u0026gt; 8 \u0026lt;br/\u0026gt; 9 \u0026lt;form class=\u0026#34;clr-form clr-form-horizontal\u0026#34;\u0026gt; 10 \u0026lt;div class=\u0026#34;clr-form-control\u0026#34;\u0026gt; 11 12 \u0026lt;label for=\u0026#34;message\u0026#34; class=\u0026#34;clr-control-label\u0026#34;\u0026gt;Message\u0026lt;/label\u0026gt; 13 \u0026lt;div class=\u0026#34;clr-control-container\u0026#34;\u0026gt; 14 \u0026lt;div class=\u0026#34;clr-input-wrapper\u0026#34;\u0026gt; 15 \u0026lt;input [(ngModel)]=\u0026#34;message\u0026#34; type=\u0026#34;text\u0026#34; id=\u0026#34;message\u0026#34; name=\u0026#34;message\u0026#34; size=\u0026#34;50\u0026#34; 16 placeholder=\u0026#34;message\u0026#34; class=\u0026#34;clr-input\u0026#34;/\u0026gt; 17 \u0026lt;/div\u0026gt; 18 \u0026lt;/div\u0026gt; 19 \u0026lt;/div\u0026gt; 20 \u0026lt;div class=\u0026#34;clr-form-control\u0026#34;\u0026gt; 21 \u0026lt;div class=\u0026#34;clr-control-container\u0026#34;\u0026gt; 22 \u0026lt;button type=\u0026#34;submit\u0026#34; class=\u0026#34;btn btn-primary btn-block\u0026#34; (click)=\u0026#34;sendMessage()\u0026#34; 23 [disabled]=\u0026#34;chatStatus !== \u0026#39;Connected\u0026#39;\u0026#34;\u0026gt;Send 24 \u0026lt;/button\u0026gt; 25 \u0026lt;/div\u0026gt; 26 \u0026lt;/div\u0026gt; 27 \u0026lt;/form\u0026gt; 28 29 \u0026lt;table class=\u0026#34;table\u0026#34;\u0026gt; 30 \u0026lt;caption\u0026gt; 31 Chat Messages 32 \u0026lt;/caption\u0026gt; 33 \u0026lt;thead\u0026gt; 34 \u0026lt;th class=\u0026#34;left\u0026#34;\u0026gt;From\u0026lt;/th\u0026gt; 35 \u0026lt;th class=\u0026#34;left\u0026#34;\u0026gt;Message\u0026lt;/th\u0026gt; 36 \u0026lt;th class=\u0026#34;left\u0026#34;\u0026gt;Sent At\u0026lt;/th\u0026gt; 37 \u0026lt;/thead\u0026gt; 38 \u0026lt;tbody\u0026gt; 39 \u0026lt;tr *ngFor=\u0026#34;let msg of messageList\u0026#34;\u0026gt; 40 \u0026lt;td class=\u0026#34;left\u0026#34;\u0026gt;{{msg.from}}\u0026lt;/td\u0026gt; 41 \u0026lt;td class=\u0026#34;left\u0026#34;\u0026gt;{{msg.text}}\u0026lt;/td\u0026gt; 42 \u0026lt;td class=\u0026#34;left\u0026#34;\u0026gt;{{msg.sentAt}}\u0026lt;/td\u0026gt; 43 \u0026lt;/tr\u0026gt; 44 \u0026lt;/tbody\u0026gt; 45 \u0026lt;/table\u0026gt; 46 47 \u0026lt;/div\u0026gt; 48 \u0026lt;/div\u0026gt; 49 \u0026lt;/div\u0026gt; 50\u0026lt;/div\u0026gt; 1import {Component, OnInit} from \u0026#39;@angular/core\u0026#39;; 2import {Chat} from \u0026#34;../models/chat\u0026#34;; 3import * as SockJS from \u0026#34;sockjs-client\u0026#34;; 4import {Stomp} from \u0026#34;@stomp/stompjs\u0026#34;; 5 6@Component({ 7 selector: \u0026#39;app-home\u0026#39;, 8 templateUrl: \u0026#39;./home.component.html\u0026#39;, 9 styleUrls: [\u0026#39;./home.component.css\u0026#39;] 10}) 11export class HomeComponent implements OnInit { 12 13 message: string = \u0026#39;\u0026#39;; 14 chatStatus: string = \u0026#39;Disconnected\u0026#39;; 15 16 stompClient: any; 17 public messageList: Chat[] = []; 18 19 constructor() { 20 } 21 22 ngOnInit(): void { 23 } 24 25 connect() { 26 console.log(window.location.href); 27 const serverUrl = \u0026#39;http://localhost:8080/chat-app\u0026#39;; 28 const ws = new SockJS(serverUrl); 29 this.stompClient = Stomp.over(ws); 30 const that = this; 31 this.stompClient.connect({}, function (frame: any) { 32 that.chatStatus = \u0026#39;Connected\u0026#39;; 33 that.stompClient.subscribe(\u0026#39;/message\u0026#39;, (message: any) =\u0026gt; { 34 if (message.body) { 35 that.messageList.push(JSON.parse(message.body)); 36 } 37 }); 38 }, this.errorCallBack); 39 } 40 41 disconnect() { 42 if (this.stompClient !== null) { 43 this.stompClient.disconnect(); 44 } 45 this.chatStatus = \u0026#39;Disconnected\u0026#39;; 46 console.log(\u0026#34;Disconnected\u0026#34;); 47 } 48 49 sendMessage() { 50 if (this.message) { 51 let chat: Chat = new Chat(); 52 chat.text = this.message; 53 console.log(\u0026#34;Sending chat: \u0026#34; + chat); 54 this.stompClient.send(\u0026#39;/app/send/message\u0026#39;, {}, JSON.stringify(chat)); 55 this.message = \u0026#39;\u0026#39;; 56 } 57 } 58 59 errorCallBack(error: any) { 60 console.log(\u0026#34;errorCallBack -\u0026gt; \u0026#34; + error) 61 setTimeout(() =\u0026gt; { 62 this.connect(); 63 }, 5000); 64 } 65} Setup 1# Project 92 2 3Chat Server 4 5[https://gitorko.github.io/chat-server/](https://gitorko.github.io/chat-server/) 6 7### Version 8 9Check version 10 11```bash 12$java -version 13openjdk version \u0026#34;21.0.3\u0026#34; 2024-04-16 LTS 14 15$node --version 16v16.16.0 17 18$yarn --version 191.22.18 20``` 21 22### Dev 23 24To Run backend in dev mode 25 26```bash 27./gradlew clean build 28./gradlew bootRun 29``` 30 31To Run UI in dev mode 32 33```bash 34cd ui 35yarn install 36yarn build 37yarn start 38``` 39 40Open [http://localhost:4200](http://localhost:4200) 41 42### Prod 43 44To run as a single jar, both UI and backend are bundled to single uber jar. 45 46```bash 47./gradlew cleanBuild 48cd project92/build/libs 49java -jar project92-1.0.0.jar 50``` 51 52Open [http://localhost:8080](http://localhost:8080) 53 54### Docker 55 56```bash 57./gradlew cleanBuild 58docker build -f docker/Dockerfile --force-rm -t project92:1.0.0 . 59docker images |grep project92 60docker tag project92:1.0.0 gitorko/project92:1.0.0 61docker push gitorko/project92:1.0.0 62docker-compose -f docker/docker-compose.yml up 63``` References https://spring.io/guides/gs/messaging-stomp-websocket/\nhttps://linuxhint.com/websockets-http-2-sse-compared/\nhttps://www.toptal.com/java/stomp-spring-boot-websocket\n","link":"https://gitorko.github.io/post/chat-server/","section":"post","tags":["websocket","server-sent-event","angular","clarity","springboot"],"title":"Chat Server"},{"body":"","link":"https://gitorko.github.io/tags/clarity/","section":"tags","tags":null,"title":"Clarity"},{"body":"","link":"https://gitorko.github.io/tags/server-sent-event/","section":"tags","tags":null,"title":"Server-Sent-Event"},{"body":"","link":"https://gitorko.github.io/tags/springboot/","section":"tags","tags":null,"title":"Springboot"},{"body":"","link":"https://gitorko.github.io/tags/websocket/","section":"tags","tags":null,"title":"Websocket"},{"body":"","link":"https://gitorko.github.io/categories/websocket/","section":"categories","tags":null,"title":"WebSocket"},{"body":"Apache Superset is a modern data exploration and visualization platform.\nGithub: https://github.com/gitorko/project93\nWe will use a postgres db with a sample db and then create some charts around it in apache superset.\nPostgres Setup postgres \u0026amp; seed the employee database with some data.\n1docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:9.6.10 2docker ps 3docker exec -it pg-container psql -U postgres -W postgres 4CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 5CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 6GRANT ALL PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 7 8docker stop pg-container 9docker start pg-container 1docker exec -i pg-container psql -U postgres \u0026lt; employees.sql 2docker exec -it pg-container psql -U postgres -W postgres 3\\c test-db; 4GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO \u0026#34;test\u0026#34;; 1pwd: password Download the employee.sql file https://github.com/gitorko/project93/blob/main/employees.sql.zip\nApache Superset Create the Dockerfile. Pick the python database driver you need to install. In this case its postgres.\n{% ghcode https://github.com/gitorko/project93/blob/main/Dockerfile %}\nRun the following commands to bring up the superset server\n1docker build -t superset-image . 2docker run -d -p 8080:8088 --name my-superset superset-image 3docker exec -it my-superset superset fab create-admin \\ 4 --username admin \\ 5 --firstname Superset \\ 6 --lastname Admin \\ 7 --email admin@superset.com \\ 8 --password admin 9docker exec -it my-superset superset db upgrade 10docker exec -it my-superset superset init If the docker build fails during pip install, update the dns configuration on docker \u0026amp; restart docker\nLogin to the server\nhttp://localhost:8080/login/\n1user: admin 2pwd: admin You can also load sample database and charts\n1docker exec -it my-superset superset load_examples For the next restart you just need to start the container\n1docker start my-superset Goto Data -\u0026gt; Databases and add the postgres db. Ensure to give the IP address of the machine and not localhost.\n1postgresql://test:test@123@10.104.66.186:5432/test-db Click on datasets and add the tables.\nCreate a new chart, save the chart and create a dashboard from the chart.\nReferences https://hub.docker.com/r/apache/superset\nhttps://superset.apache.org/\n","link":"https://gitorko.github.io/post/apache-superset/","section":"post","tags":["apache-superset","visualization","postgres"],"title":"Apache Superset"},{"body":"","link":"https://gitorko.github.io/tags/apache-superset/","section":"tags","tags":null,"title":"Apache-Superset"},{"body":"","link":"https://gitorko.github.io/tags/visualization/","section":"tags","tags":null,"title":"Visualization"},{"body":"","link":"https://gitorko.github.io/categories/visualization/","section":"categories","tags":null,"title":"Visualization"},{"body":"","link":"https://gitorko.github.io/tags/etl/","section":"tags","tags":null,"title":"ETL"},{"body":"","link":"https://gitorko.github.io/categories/etl/","section":"categories","tags":null,"title":"ETL"},{"body":"","link":"https://gitorko.github.io/tags/jenkins/","section":"tags","tags":null,"title":"Jenkins"},{"body":"Spring boot application integrated with jenkins pipeline for data processing jobs.\nGithub: https://github.com/gitorko/project84\nJenkins Jenkins is mostly used for setting up CI/CD or build pipelines. Here we will use it to setup a data pipeline that can be used to orchestrate data processing jobs.\nRequirement Lets consider a company sells paint.\nSTAGE1: They get their orders from the field in file format to their FTP server. This has to be processed and uploaded to the db. As these are large files ability re-run jobs after fixing files is required. STAGE2: They receive their material supplier in file format to their FTP server. This has to be processed and uploaded to the db. As these are large files ability re-run jobs after fixing files is required. STAGE3: Once the order and material is uploaded to db, if the order can be fulfilled the paint color and quantity need to be grouped by city. STAGE4: Additional buffer needs to be added to cover any shortages. This job is a small job and can be run in parallel. STAGE5: Sales rep bonus point need to be added in case the offer is active. This job is a small job and can be run in parallel. STAGE6: Order needs to be sent to factory in each city. The features of jenkins that make it friendly for data processing are:\nLog view - Ability to look at logs across different stages Graph view - Ability to look at a run graphically. Input - Ability to provide input to job at run time. Scheduling - Ability to schedule jobs at periodic interval. Parallel execution - Ability to run jobs in parallel Agents load distribution - Ability to run the job on other agent machines distributing the load. Time to complete - Ability to see which job is running and history of runs. Time taken - Ability to view each stage time taken over long periods to identify trends in execution. Re-Run - Ability to re-run a particular stage of the failed job. Slack - Ability to notify users on slack after job completion. Pull from maven - Ability to download the jar from maven. Plugin support - Numerous plugin are available for jenkins. Code The backend job that needs to do the processing. It takes the input as arguments and processes each stage and writes the results to a postgres db.\nEnsure that each job can run in isolation and updates just one table. 2 stages should never update the same table. Ensure that it throws runtime exception in case of failure. Ensure logging is correctly added to identify the issue. Ensure the stage can be re-run many times. This is done by resetting the data. Change the value of BASE_PATH accordingly. 1package com.demo.project84; 2 3import java.io.BufferedReader; 4import java.io.Serializable; 5import java.nio.file.Files; 6import java.nio.file.Path; 7import java.nio.file.Paths; 8import java.time.LocalDate; 9import java.util.HashMap; 10import java.util.List; 11import java.util.Map; 12import jakarta.persistence.Column; 13import jakarta.persistence.Entity; 14import jakarta.persistence.GeneratedValue; 15import jakarta.persistence.GenerationType; 16import jakarta.persistence.Id; 17 18import lombok.AllArgsConstructor; 19import lombok.Builder; 20import lombok.Data; 21import lombok.NoArgsConstructor; 22import lombok.RequiredArgsConstructor; 23import lombok.SneakyThrows; 24import lombok.extern.slf4j.Slf4j; 25import org.springframework.boot.CommandLineRunner; 26import org.springframework.boot.SpringApplication; 27import org.springframework.boot.autoconfigure.SpringBootApplication; 28import org.springframework.data.jpa.repository.JpaRepository; 29 30@SpringBootApplication 31@RequiredArgsConstructor 32@Slf4j 33public class Main implements CommandLineRunner { 34 private static final String BASE_PATH = \u0026#34;/Users/asurendra/code/pet/project84/\u0026#34;; 35 36 final OrderRepo orderRepo; 37 final MaterialRepo materialRepo; 38 final ProcessedRepo processedRepo; 39 final BonusRepo bonusRepo; 40 final FactoryRepo factoryRepo; 41 42 public static void main(String[] args) { 43 SpringApplication.run(Main.class, args); 44 } 45 46 @Override 47 public void run(String... args) throws Exception { 48 String caseType = args[0]; 49 switch (caseType) { 50 case \u0026#34;STAGE1\u0026#34;: 51 stage1(); 52 break; 53 case \u0026#34;STAGE2\u0026#34;: 54 stage2(); 55 break; 56 case \u0026#34;STAGE3\u0026#34;: 57 stage3(); 58 break; 59 case \u0026#34;STAGE4\u0026#34;: 60 stage4(); 61 break; 62 case \u0026#34;STAGE5\u0026#34;: 63 stage5(); 64 break; 65 case \u0026#34;STAGE6\u0026#34;: 66 stage6(); 67 break; 68 default: 69 throw new IllegalStateException(\u0026#34;Unexpected value: \u0026#34; + caseType); 70 } 71 } 72 73 /** 74 * Load order file to db. 75 */ 76 @SneakyThrows 77 private void stage1() { 78 log.info(\u0026#34;Loading orders to db\u0026#34;); 79 try { 80 orderRepo.deleteAll(); 81 Path path = Paths.get(BASE_PATH + \u0026#34;order-file.txt\u0026#34;); 82 try (BufferedReader reader = Files.newBufferedReader(path)) { 83 while (reader.ready()) { 84 String line = reader.readLine(); 85 log.info(line); 86 String[] split = line.split(\u0026#34;,\u0026#34;); 87 OrderDetail order = OrderDetail.builder() 88 .color(split[0]) 89 .quantity(Double.valueOf(split[1])) 90 .city(split[2]) 91 .salesRep(split[3]) 92 .orderDate(LocalDate.parse(split[4])) 93 .build(); 94 orderRepo.save(order); 95 } 96 } 97 log.info(\u0026#34;Loading orders completed\u0026#34;); 98 } catch (Exception ex) { 99 log.error(\u0026#34;ERROR: stage1\u0026#34;, ex); 100 throw new RuntimeException(\u0026#34;ERROR: stage1\u0026#34;); 101 } 102 } 103 104 /** 105 * Load material file to db. 106 */ 107 @SneakyThrows 108 private void stage2() { 109 log.info(\u0026#34;Loading materials to db\u0026#34;); 110 try { 111 materialRepo.deleteAll(); 112 Path path = Paths.get(BASE_PATH + \u0026#34;material-file.txt\u0026#34;); 113 try (BufferedReader reader = Files.newBufferedReader(path)) { 114 while (reader.ready()) { 115 String line = reader.readLine(); 116 log.info(line); 117 String[] split = line.split(\u0026#34;,\u0026#34;); 118 MaterialDetail material = MaterialDetail.builder() 119 .color(split[0]) 120 .quantity(Double.valueOf(split[1])) 121 .orderDate(LocalDate.parse(split[2])) 122 .build(); 123 materialRepo.save(material); 124 } 125 } 126 log.info(\u0026#34;Loading orders completed\u0026#34;); 127 } catch (Exception ex) { 128 log.error(\u0026#34;ERROR: stage2\u0026#34;, ex); 129 throw new RuntimeException(\u0026#34;ERROR: stage2\u0026#34;); 130 } 131 } 132 133 /** 134 * Process orders if it can be fulfilled. 135 */ 136 private void stage3() { 137 log.info(\u0026#34;Processing orders\u0026#34;); 138 try { 139 processedRepo.deleteAll(); 140 bonusRepo.deleteAll(); 141 factoryRepo.deleteAll(); 142 Map\u0026lt;String, Double\u0026gt; cache = new HashMap\u0026lt;\u0026gt;(); 143 materialRepo.findAll().forEach(m -\u0026gt; { 144 cache.put(m.getColor(), m.getQuantity()); 145 }); 146 Map\u0026lt;String, Double\u0026gt; result = new HashMap\u0026lt;\u0026gt;(); 147 List\u0026lt;OrderDetail\u0026gt; orders = orderRepo.findAll(); 148 for (OrderDetail order : orders) { 149 Double balance = cache.get(order.getColor()); 150 if (order.getQuantity() \u0026lt; balance) { 151 balance = balance - order.getQuantity(); 152 cache.put(order.getColor(), balance); 153 String key = order.getColor() + \u0026#34;:\u0026#34; + order.getCity(); 154 Double count = result.containsKey(key) ? result.get(key) + order.getQuantity() : order.getQuantity(); 155 result.put(key, count); 156 //add to processed. 157 } else { 158 log.info(\u0026#34;ERROR: stage3, will not be able to complete all order!\u0026#34;); 159 throw new RuntimeException(\u0026#34;ERROR: stage3, will not be able to complete all order!\u0026#34;); 160 } 161 } 162 result.forEach((k, v) -\u0026gt; { 163 String[] split = k.split(\u0026#34;\\\\:\u0026#34;); 164 processedRepo.save(ProcessedDetail.builder() 165 .color(split[0]) 166 .quantity(v) 167 .processDate(LocalDate.now()) 168 .city(split[1]) 169 .build()); 170 }); 171 log.info(\u0026#34;Processing orders completed\u0026#34;); 172 } catch (Exception ex) { 173 log.error(\u0026#34;ERROR: stage3\u0026#34;, ex); 174 throw new RuntimeException(\u0026#34;ERROR: stage3\u0026#34;); 175 } 176 } 177 178 /** 179 * Add buffer to order quantity to ensure no shortage. 180 */ 181 private void stage4() { 182 log.info(\u0026#34;Adding buffer\u0026#34;); 183 try { 184 factoryRepo.deleteAll(); 185 List\u0026lt;ProcessedDetail\u0026gt; processedDetail = processedRepo.findAll(); 186 processedDetail.forEach(p -\u0026gt; { 187 FactoryDetail factory = FactoryDetail.builder() 188 .color(p.getColor()) 189 .city(p.getCity()) 190 .processDate(LocalDate.now()) 191 .build(); 192 if (p.getQuantity() \u0026gt; 500) { 193 factory.setQuantity(p.getQuantity() + (p.getQuantity() * 0.30)); 194 } else if (p.getQuantity() \u0026gt; 200) { 195 factory.setQuantity(p.getQuantity() + (p.getQuantity() * 0.20)); 196 } else if (p.getQuantity() \u0026gt; 100) { 197 factory.setQuantity(p.getQuantity() + (p.getQuantity() * 0.10)); 198 p.setQuantity(p.getQuantity() + (p.getQuantity() * 0.10)); 199 } else { 200 p.setQuantity(p.getQuantity()); 201 } 202 factoryRepo.save(factory); 203 204 }); 205 log.info(\u0026#34;Adding buffer completed\u0026#34;); 206 } catch (Exception ex) { 207 log.error(\u0026#34;ERROR: stage4\u0026#34;, ex); 208 throw new RuntimeException(\u0026#34;ERROR: stage4\u0026#34;); 209 } 210 } 211 212 /** 213 * Add bonus points for sales rep. 214 */ 215 private void stage5() { 216 log.info(\u0026#34;Adding Sales bonus\u0026#34;); 217 try { 218 bonusRepo.deleteAll(); 219 Map\u0026lt;String, Double\u0026gt; result = new HashMap\u0026lt;\u0026gt;(); 220 List\u0026lt;OrderDetail\u0026gt; orders = orderRepo.findAll(); 221 for (OrderDetail order : orders) { 222 String key = order.getSalesRep(); 223 Double count = result.containsKey(key) ? result.get(key) + order.getQuantity() : order.getQuantity(); 224 result.put(key, count); 225 } 226 227 result.forEach((k, v) -\u0026gt; { 228 if (v \u0026gt; 200) { 229 bonusRepo.save(BonusDetail.builder() 230 .salesRep(k) 231 .bonusPoints(5) 232 .orderDate(LocalDate.now()) 233 .build()); 234 } 235 if (v \u0026gt; 500) { 236 bonusRepo.save(BonusDetail.builder() 237 .salesRep(k) 238 .bonusPoints(15) 239 .orderDate(LocalDate.now()) 240 .build()); 241 } 242 }); 243 log.info(\u0026#34;Adding Sales bonus completed\u0026#34;); 244 } catch (Exception ex) { 245 log.error(\u0026#34;ERROR: stage5\u0026#34;, ex); 246 throw new RuntimeException(\u0026#34;ERROR: stage5\u0026#34;); 247 } 248 } 249 250 /** 251 * Notify factory to start production. 252 */ 253 private void stage6() { 254 log.info(\u0026#34;Notifying factory\u0026#34;); 255 try { 256 List\u0026lt;ProcessedDetail\u0026gt; processedDetail = processedRepo.findAll(); 257 processedDetail.forEach(p -\u0026gt; { 258 log.info(\u0026#34;Notifiying factory: {}\u0026#34;, p); 259 }); 260 log.info(\u0026#34;Notifying factory completed\u0026#34;); 261 } catch (Exception ex) { 262 log.error(\u0026#34;ERROR: stage6\u0026#34;, ex); 263 throw new RuntimeException(\u0026#34;ERROR: stage6\u0026#34;); 264 } 265 } 266} 267 268interface BonusRepo extends JpaRepository\u0026lt;BonusDetail, Long\u0026gt; { 269} 270 271interface MaterialRepo extends JpaRepository\u0026lt;MaterialDetail, Long\u0026gt; { 272} 273 274interface OrderRepo extends JpaRepository\u0026lt;OrderDetail, Long\u0026gt; { 275} 276 277interface ProcessedRepo extends JpaRepository\u0026lt;ProcessedDetail, Long\u0026gt; { 278} 279 280interface FactoryRepo extends JpaRepository\u0026lt;FactoryDetail, Long\u0026gt; { 281} 282 283@Entity 284@Data 285@Builder 286@AllArgsConstructor 287@NoArgsConstructor 288class BonusDetail { 289 @Id 290 @GeneratedValue(strategy = GenerationType.AUTO) 291 @Column(name = \u0026#34;id\u0026#34;) 292 private Long id; 293 private String salesRep; 294 private Integer bonusPoints; 295 private LocalDate orderDate; 296} 297 298@Entity 299@Data 300@Builder 301@AllArgsConstructor 302@NoArgsConstructor 303class MaterialDetail implements Serializable { 304 305 @Id 306 @GeneratedValue(strategy = GenerationType.AUTO) 307 @Column(name = \u0026#34;id\u0026#34;) 308 private Long id; 309 private String color; 310 private Double quantity; 311 private LocalDate orderDate; 312 313} 314 315@Entity 316@Data 317@Builder 318@AllArgsConstructor 319@NoArgsConstructor 320class OrderDetail { 321 @Id 322 @GeneratedValue(strategy = GenerationType.AUTO) 323 @Column(name = \u0026#34;id\u0026#34;) 324 private Long id; 325 private String color; 326 private Double quantity; 327 private String city; 328 private String salesRep; 329 private LocalDate orderDate; 330} 331 332@Entity 333@Data 334@Builder 335@AllArgsConstructor 336@NoArgsConstructor 337class ProcessedDetail { 338 @Id 339 @GeneratedValue(strategy = GenerationType.AUTO) 340 @Column(name = \u0026#34;id\u0026#34;) 341 private Long id; 342 private String color; 343 private Double quantity; 344 private String city; 345 private LocalDate processDate; 346} 347 348@Entity 349@Data 350@Builder 351@AllArgsConstructor 352@NoArgsConstructor 353class FactoryDetail { 354 @Id 355 @GeneratedValue(strategy = GenerationType.AUTO) 356 @Column(name = \u0026#34;id\u0026#34;) 357 private Long id; 358 private String color; 359 private Double quantity; 360 private String city; 361 private LocalDate processDate; 362} The properties file\n1spring: 2 main: 3 banner-mode: \u0026#34;off\u0026#34; 4 web-application-type: none 5 datasource: 6 driver-class-name: org.postgresql.Driver 7 url: jdbc:postgresql://localhost:5432/test-db 8 username: test 9 password: test@123 10 jpa: 11 show-sql: false 12 hibernate.ddl-auto: update 13 properties.hibernate.temp.use_jdbc_metadata_defaults: false 14 database-platform: org.hibernate.dialect.PostgreSQLDialect Jenkins To setup jenkins download the jenkins.war from https://www.jenkins.io/download/ and start the server. Once the server starts you will see the admin password in the console log. This will be used to setup jenkins for the first time. This will be a one time activity.\n1java -jar jenkins.war Open the below url\nhttp://localhost:8080/\nAlternate way to setup jenkins via docker\n1docker run --name my-jenkins -p 8080:8080 -p 50000:50000 jenkins/jenkins:lts-jdk11 Follow the steps to finish the configuration\nInstall the 'Pipeline Implementation for Blue Ocean plugin' to look at graphs\n1user: admin 2pwd: admin@123 Go to Dashboard and click on 'New Item' and create a pipeline, enter the script below and click on 'Build Now' and ensure it is successful.\n1pipeline { 2 agent any 3 4 stages { 5 stage(\u0026#39;STAGE1\u0026#39;) { 6 steps { 7 echo \u0026#39;STAGE1..\u0026#39; 8 } 9 } 10 stage(\u0026#39;STAGE2\u0026#39;) { 11 steps { 12 echo \u0026#39;STAGE2..\u0026#39; 13 } 14 } 15 } 16} If the setup is correct this test job should be successful.\nNow create 6 pipeline jobs and a master pipeline job. All stage jobs will be same as below but input param will change.\nstage1-job - STAGE1 stage2-job - STAGE2 stage3-job - STAGE3 stage4-job - STAGE4 stage5-job - STAGE5 stage6-job - STAGE6\nChange param accordingly\n1pipeline { 2 agent any 3 4 stages { 5 stage(\u0026#39;STAGE1\u0026#39;) { 6 steps { 7 dir (\u0026#34;/Users/asurendra/code/pet/project84/build/libs\u0026#34;) { 8 sh \u0026#34;java -jar project84-1.0.0.jar STAGE1\u0026#34; 9 } 10 } 11 } 12 } 13} data-job-pipeline job\n1pipeline { 2 agent any 3 parameters { 4 booleanParam(name: \u0026#34;BONUS_OFFER\u0026#34;, defaultValue: true) 5 } 6 stages { 7 stage(\u0026#39;STAGE1\u0026#39;) { 8 steps { 9 build job: \u0026#39;stage1-job\u0026#39; 10 } 11 } 12 stage(\u0026#39;STAGE2\u0026#39;) { 13 steps { 14 build job: \u0026#39;stage2-job\u0026#39; 15 } 16 } 17 stage(\u0026#39;STAGE3\u0026#39;) { 18 steps { 19 build job: \u0026#39;stage3-job\u0026#39; 20 } 21 } 22 stage(\u0026#34;FORK\u0026#34;) { 23 parallel { 24 stage(\u0026#39;STAGE4\u0026#39;) { 25 steps { 26 build job: \u0026#39;stage4-job\u0026#39; 27 } 28 } 29 stage(\u0026#39;STAGE5\u0026#39;) { 30 //If bonus points are counted for sales then run this job. 31 when { expression { params.BONUS_OFFER } } 32 steps { 33 build job: \u0026#39;stage5-job\u0026#39; 34 } 35 } 36 } 37 } 38 stage(\u0026#39;STAGE6\u0026#39;) { 39 steps { 40 build job: \u0026#39;stage6-job\u0026#39; 41 } 42 } 43 44 } 45} Click on 'Build with Parameters' and select the input checkbox. If bonus offer is applicable STAGE5 is executed else it wont be executed.\nMonitor the job\nOnce the job is complete click on 'Pipeline graph' this shows the path taken graphically. You can also see the time take for each stage to complete. This can be useful to monitor the job over long time periods.\nRerun the job without the bonus offer checkbox, once completed you will see the graph shows the node with STAGE5 as skipped.\nLook at the 'Console Output' that track each jobs log output. You can drill down to each stage job and look at the log specific to that.\nNow lets make a stage fail and then fix the issue and re-run the stage.\nModify the material-file.txt and reduce the quantity to 10. Run the 'data-job-pipeline' job.\nSince the materials are less and order cant be fulfilled the pipeline will fail, you can now look at the logs and identify the issue.\nFix the file again by changing the value back to what it was. Click on 'Restart from Stage' and select STAGE2. We need to seed the material file again hence restarting at STAGE2.\nOnce the job is successful you will notice that it didnt run the STAGE1 job and only ran STAGE2 and onwards.\nYou can even schedule this job to run daily.\nSetup 1# Project61 2 3Jenkins Pipeline + Data processing 4 5[https://gitorko.github.io/jenkins-data-processing/](https://gitorko.github.io/jenkins-data-processing/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk 17.0.3 2022-04-19 LTS 14``` 15 16### Postgres DB 17 18``` 19docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:9.6.10 20docker ps 21docker exec -it pg-container psql -U postgres -W postgres 22CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 23CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 24grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 25 26docker stop pg-container 27docker start pg-container 28``` 29 30### Dev 31 32Build the project \u0026amp; test if the jar works. 33 34```bash 35./gradlew clean build 36cd project84/build/libs 37java -jar project84-1.0.0.jar STAGE1 38java -jar project84-1.0.0.jar STAGE2 39java -jar project84-1.0.0.jar STAGE3 40java -jar project84-1.0.0.jar STAGE4 41java -jar project84-1.0.0.jar STAGE5 42java -jar project84-1.0.0.jar STAGE6 43``` 44 45To truncate the tables 46 47```sql 48truncate order_detail; 49truncate material_detail; 50truncate processed_detail; 51truncate bonus_detail; 52truncate factory_detail; 53``` References https://github.com/jenkinsci/docker\nhttps://www.jenkins.io/doc/book/pipeline/\n","link":"https://gitorko.github.io/post/jenkins-data-processing/","section":"post","tags":["pipeline","ETL","jenkins"],"title":"Jenkins - Data Processing"},{"body":"","link":"https://gitorko.github.io/tags/pipeline/","section":"tags","tags":null,"title":"Pipeline"},{"body":"","link":"https://gitorko.github.io/tags/mongodb/","section":"tags","tags":null,"title":"Mongodb"},{"body":"Spring Reactive web application with angular clarity and \u0026amp; reactive mongo db. Creates uber jar to deploy.\nGithub: https://github.com/gitorko/project60\nQuick Overview To deploy the application in a single command, clone the project, make sure no conflicting docker containers or ports are running and then run\n1git clone https://github.com/gitorko/project60 2cd project60 3docker-compose -f docker/docker-compose.yml up Open http://localhost:8080/\nFeatures Clarity is an open source library that provides various Angular components.\nCode 1package com.demo.project60; 2 3import static org.springframework.web.reactive.function.server.RequestPredicates.GET; 4import static org.springframework.web.reactive.function.server.RouterFunctions.route; 5import static org.springframework.web.reactive.function.server.ServerResponse.ok; 6 7import java.util.Arrays; 8import java.util.List; 9import java.util.Random; 10 11import com.demo.project60.domain.Customer; 12import com.demo.project60.repository.CustomerRepository; 13import lombok.extern.slf4j.Slf4j; 14import org.springframework.beans.factory.annotation.Value; 15import org.springframework.boot.CommandLineRunner; 16import org.springframework.boot.SpringApplication; 17import org.springframework.boot.autoconfigure.SpringBootApplication; 18import org.springframework.context.annotation.Bean; 19import org.springframework.core.io.Resource; 20import org.springframework.http.MediaType; 21import org.springframework.web.reactive.function.server.RouterFunction; 22import org.springframework.web.reactive.function.server.ServerResponse; 23import reactor.core.publisher.Flux; 24 25@SpringBootApplication 26@Slf4j 27public class Main { 28 public static void main(String[] args) { 29 SpringApplication.run(Main.class, args); 30 } 31 32 @Bean 33 public CommandLineRunner seedData(CustomerRepository customerRepository) { 34 return args -\u0026gt; { 35 log.info(\u0026#34;Initializing repo!\u0026#34;); 36 List\u0026lt;String\u0026gt; city = Arrays.asList(\u0026#34;London\u0026#34;, \u0026#34;New York\u0026#34;, \u0026#34;Bangalore\u0026#34;); 37 Flux\u0026lt;Customer\u0026gt; customers = Flux.range(1, 5).map(i -\u0026gt; { 38 int randomIndex = new Random().nextInt(2 - 0 + 1) + 0; 39 return new Customer(null, \u0026#34;first_\u0026#34; + i, \u0026#34;last_\u0026#34; + i, city.get(randomIndex)); 40 }); 41 customerRepository.deleteAll() 42 .thenMany(customers.flatMap(customerRepository::save) 43 .thenMany(customerRepository.findAll())) 44 .subscribe(e -\u0026gt; log.info(e.toString())); 45 log.info(\u0026#34;Data seed completed!\u0026#34;); 46 }; 47 } 48} 1package com.demo.project60.repository; 2 3import com.demo.project60.domain.Customer; 4import org.springframework.data.mongodb.repository.ReactiveMongoRepository; 5 6public interface CustomerRepository extends ReactiveMongoRepository\u0026lt;Customer, String\u0026gt; { 7} 1spring: 2 main: 3 banner-mode: \u0026#34;off\u0026#34; 4 data: 5 mongodb: 6 database: test-db 7 username: test 8 password: test@123 9 host: localhost 10 port: 27017 11 authentication-database: admin 1import {Injectable} from \u0026#39;@angular/core\u0026#39;; 2import {HttpClient} from \u0026#39;@angular/common/http\u0026#39;; 3import {Observable} from \u0026#39;rxjs\u0026#39;; 4import {Customer} from \u0026#34;../models/customer\u0026#34;; 5 6@Injectable({ 7 providedIn: \u0026#39;root\u0026#39; 8}) 9export class RestService { 10 11 constructor(private http: HttpClient) { 12 } 13 14 public getCustomers(): Observable\u0026lt;Customer[]\u0026gt; { 15 return this.http.get\u0026lt;Customer[]\u0026gt;(\u0026#39;/api/customer\u0026#39;); 16 } 17 18 public saveCustomer(customer: Customer) { 19 return this.http.post(\u0026#39;/api/customer\u0026#39;, customer); 20 } 21 22 public deleteCustomer(id: any): Observable\u0026lt;any\u0026gt; { 23 return this.http.delete(\u0026#39;/api/customer/\u0026#39; + id); 24 } 25 26 public getTime(): Observable\u0026lt;string\u0026gt; { 27 return this.http.get\u0026lt;string\u0026gt;(\u0026#39;/api/time\u0026#39;); 28 } 29} 1\u0026lt;div class=\u0026#34;content-container\u0026#34;\u0026gt; 2 \u0026lt;div class=\u0026#34;content-area\u0026#34;\u0026gt; 3 4 \u0026lt;div class=\u0026#34;clr-row\u0026#34;\u0026gt; 5 6 \u0026lt;div class=\u0026#34;clr-col-12\u0026#34;\u0026gt; 7 \u0026lt;p style=\u0026#34;text-align: center\u0026#34;\u0026gt; 8 \u0026lt;!-- interpolation \u0026amp; pipe --\u0026gt; 9 Server Time: {{currentTime | date:\u0026#39;dd-MM-yyyy\u0026#39; }} 10 \u0026lt;/p\u0026gt; 11 12 \u0026lt;h2 style=\u0026#34;text-align: center\u0026#34;\u0026gt;Customers\u0026lt;/h2\u0026gt; 13 14 \u0026lt;clr-datagrid\u0026gt; 15 \u0026lt;clr-dg-placeholder class=\u0026#34;content-center\u0026#34;\u0026gt;No Customers!\u0026lt;/clr-dg-placeholder\u0026gt; 16 \u0026lt;clr-dg-column [clrDgField]=\u0026#34;\u0026#39;id\u0026#39;\u0026#34;\u0026gt;ID\u0026lt;/clr-dg-column\u0026gt; 17 \u0026lt;clr-dg-column [clrDgField]=\u0026#34;\u0026#39;firstName\u0026#39;\u0026#34;\u0026gt;First Name\u0026lt;/clr-dg-column\u0026gt; 18 \u0026lt;clr-dg-column [clrDgField]=\u0026#34;\u0026#39;lastName\u0026#39;\u0026#34;\u0026gt;Last Name\u0026lt;/clr-dg-column\u0026gt; 19 \u0026lt;clr-dg-column [clrDgField]=\u0026#34;\u0026#39;city\u0026#39;\u0026#34;\u0026gt;City\u0026lt;/clr-dg-column\u0026gt; 20 \u0026lt;clr-dg-column\u0026gt;Action\u0026lt;/clr-dg-column\u0026gt; 21 \u0026lt;!-- structural directive --\u0026gt; 22 \u0026lt;clr-dg-row clr-dg-row *clrDgItems=\u0026#34;let customer of customers\u0026#34;\u0026gt; 23 \u0026lt;clr-dg-cell\u0026gt;{{customer.id}}\u0026lt;/clr-dg-cell\u0026gt; 24 \u0026lt;clr-dg-cell\u0026gt;{{customer.firstName}}\u0026lt;/clr-dg-cell\u0026gt; 25 \u0026lt;clr-dg-cell\u0026gt;{{customer.lastName}}\u0026lt;/clr-dg-cell\u0026gt; 26 \u0026lt;clr-dg-cell\u0026gt;{{customer.city}}\u0026lt;/clr-dg-cell\u0026gt; 27 \u0026lt;clr-dg-cell\u0026gt; 28 \u0026lt;cds-icon shape=\u0026#34;trash\u0026#34; style=\u0026#34;cursor: pointer; color: blue\u0026#34; (click)=\u0026#34;deleteCustomer(customer)\u0026#34;\u0026gt; 29 \u0026lt;/cds-icon\u0026gt; 30 \u0026lt;/clr-dg-cell\u0026gt; 31 \u0026lt;/clr-dg-row\u0026gt; 32 \u0026lt;clr-dg-footer\u0026gt;{{customers.length}} customers\u0026lt;/clr-dg-footer\u0026gt; 33 \u0026lt;/clr-datagrid\u0026gt; 34 35 \u0026lt;div class=\u0026#34;clr-col-12\u0026#34;\u0026gt; 36 \u0026lt;form class=\u0026#34;clr-form clr-form-horizontal\u0026#34;\u0026gt; 37 \u0026lt;div class=\u0026#34;clr-form-control\u0026#34;\u0026gt; 38 \u0026lt;label for=\u0026#34;firstName\u0026#34; class=\u0026#34;clr-control-label\u0026#34;\u0026gt;First Name\u0026lt;/label\u0026gt; 39 \u0026lt;div class=\u0026#34;clr-control-container\u0026#34;\u0026gt; 40 \u0026lt;div class=\u0026#34;clr-input-wrapper\u0026#34;\u0026gt; 41 \u0026lt;!-- two way data binding --\u0026gt; 42 \u0026lt;input type=\u0026#34;text\u0026#34; [(ngModel)]=\u0026#34;customer.firstName\u0026#34; id=\u0026#34;firstName\u0026#34; name=\u0026#34;firstName\u0026#34; 43 placeholder=\u0026#34;Placeholder\u0026#34; class=\u0026#34;clr-input\u0026#34;/\u0026gt; 44 \u0026lt;/div\u0026gt; 45 \u0026lt;/div\u0026gt; 46 \u0026lt;/div\u0026gt; 47 \u0026lt;div class=\u0026#34;clr-form-control\u0026#34;\u0026gt; 48 \u0026lt;label for=\u0026#34;lastName\u0026#34; class=\u0026#34;clr-control-label\u0026#34;\u0026gt;Last Name\u0026lt;/label\u0026gt; 49 \u0026lt;div class=\u0026#34;clr-control-container\u0026#34;\u0026gt; 50 \u0026lt;div class=\u0026#34;clr-input-wrapper\u0026#34;\u0026gt; 51 \u0026lt;input [(ngModel)]=\u0026#34;customer.lastName\u0026#34; type=\u0026#34;text\u0026#34; id=\u0026#34;lastName\u0026#34; name=\u0026#34;lastName\u0026#34; 52 placeholder=\u0026#34;Placeholder\u0026#34; class=\u0026#34;clr-input\u0026#34;/\u0026gt; 53 \u0026lt;/div\u0026gt; 54 \u0026lt;/div\u0026gt; 55 \u0026lt;/div\u0026gt; 56 \u0026lt;div class=\u0026#34;clr-form-control\u0026#34;\u0026gt; 57 \u0026lt;div class=\u0026#34;clr-control-container\u0026#34;\u0026gt; 58 \u0026lt;!-- event binding --\u0026gt; 59 \u0026lt;button type=\u0026#34;submit\u0026#34; class=\u0026#34;btn btn-primary\u0026#34; (click)=\u0026#34;saveCustomer()\u0026#34;\u0026gt;Save\u0026lt;/button\u0026gt; 60 \u0026lt;/div\u0026gt; 61 \u0026lt;/div\u0026gt; 62 \u0026lt;/form\u0026gt; 63 \u0026lt;/div\u0026gt; 64 65 \u0026lt;/div\u0026gt; 66 \u0026lt;/div\u0026gt; 67 \u0026lt;/div\u0026gt; 68\u0026lt;/div\u0026gt; 1import {Component, OnInit} from \u0026#39;@angular/core\u0026#39;; 2import {Customer} from \u0026#34;../models/customer\u0026#34;; 3import {RestService} from \u0026#34;../services/rest.service\u0026#34;; 4import {ClarityIcons, trashIcon} from \u0026#34;@cds/core/icon\u0026#34;; 5 6@Component({ 7 selector: \u0026#39;app-home\u0026#39;, 8 templateUrl: \u0026#39;./home.component.html\u0026#39;, 9 styleUrls: [\u0026#39;./home.component.css\u0026#39;] 10}) 11export class HomeComponent implements OnInit { 12 13 customers: Customer[] = []; 14 customer: Customer = new Customer(); 15 currentTime = \u0026#39;\u0026#39;; 16 17 constructor(private restService: RestService) { 18 ClarityIcons.addIcons(trashIcon); 19 } 20 21 ngOnInit() { 22 this.getCustomers(); 23 } 24 25 getCustomers(): void { 26 this.customer = new Customer(); 27 this.restService.getCustomers().subscribe(data =\u0026gt; { 28 this.customers = data; 29 }); 30 this.restService.getTime().subscribe(data =\u0026gt; { 31 this.currentTime = data; 32 }); 33 } 34 35 saveCustomer(): void { 36 this.restService.saveCustomer(this.customer) 37 .subscribe(data =\u0026gt; { 38 this.getCustomers(); 39 }, error =\u0026gt; { 40 console.log(error); 41 }); 42 } 43 44 deleteCustomer(customer: Customer): void { 45 console.log(\u0026#39;delete: \u0026#39; + customer.id); 46 this.restService.deleteCustomer(customer.id) 47 .subscribe(data =\u0026gt; { 48 this.getCustomers(); 49 }, error =\u0026gt; { 50 console.log(error); 51 }); 52 } 53 54} Setup 1# Project 60 2 3Spring WebFlux \u0026amp; Angular, Reactive MongoDB, Clarity, Docker 4 5[https://gitorko.github.io/spring-webflux-angular/](https://gitorko.github.io/spring-webflux-angular/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk 17.0.3 2022-04-19 LTS 14 15node --version 16v16.16.0 17 18yarn --version 191.22.18 20``` 21 22### Mongo DB 23 24```bash 25docker run --name my-mongo -e MONGO_INITDB_ROOT_USERNAME=test -e MONGO_INITDB_ROOT_PASSWORD=test@123 -p 27017:27017 -d mongo 26docker ps 27``` 28 29### Dev 30 31To run the backend in dev mode. 32 33```bash 34./gradlew clean build 35./gradlew bootRun 36``` 37 38To Run UI in dev mode 39 40```bash 41cd ui 42yarn install 43yarn build 44yarn start 45``` 46 47Open [http://localhost:4200/](http://localhost:4200/) 48 49### Prod 50 51To run as a single jar, both UI and backend are bundled to single uber jar. 52 53```bash 54./gradlew cleanBuild 55cd build/libs 56java -jar project60-1.0.0.jar 57``` 58 59Open [http://localhost:8080/](http://localhost:8080/) 60 61### Docker 62 63```bash 64./gradlew cleanBuild 65docker build -f docker/Dockerfile --force-rm -t project60:1.0.0 . 66docker images |grep project60 67docker tag project60:1.0.0 gitorko/project60:1.0.0 68docker push gitorko/project60:1.0.0 69docker-compose -f docker/docker-compose.yml up 70``` 71 72## Commands 73 74```bash 75ng new ui 76cd ui 77yarn add @cds/core @clr/icons @clr/angular @clr/ui 78``` 79 80proxy.config.json redirects the client calls 81 82```json 83{ 84 \u0026#34;/api/*\u0026#34;: { 85 \u0026#34;target\u0026#34;: \u0026#34;http://localhost:8080/\u0026#34;, 86 \u0026#34;secure\u0026#34;: false, 87 \u0026#34;logLevel\u0026#34;: \u0026#34;debug\u0026#34; 88 } 89} 90``` 91 92Modify package.json file, change the start \u0026amp; build command to 93 94```bash 95\u0026#34;start\u0026#34;: \u0026#34;ng serve --proxy-config proxy.config.json --open\u0026#34;, 96\u0026#34;build\u0026#34;: \u0026#34;ng build --prod\u0026#34;, 97``` 98 99Update the routing.The useHash:true will be useful when we deploy the application in a single uber jar later. 100If we dont use this then the back button on the application will run into errors. It uses a hash based routing instead of the default location based routing. 101 102If you run into the error 103 104```bash 105Error: initial exceeded maximum budget. 106``` 107 108Update the budget in angular.json file 109 110``` 111\u0026#34;maximumWarning\u0026#34;: \u0026#34;4mb\u0026#34;, 112\u0026#34;maximumError\u0026#34;: \u0026#34;5mb\u0026#34; 113``` References Angular Clartiy Spring Boot Spring Webflux\n","link":"https://gitorko.github.io/post/spring-webflux-angular/","section":"post","tags":["webflux","clarity","angular","mongodb"],"title":"Spring Webflux \u0026 Angular"},{"body":"","link":"https://gitorko.github.io/categories/spring-webflux/","section":"categories","tags":null,"title":"Spring-Webflux"},{"body":"Flash sale system developed with Spring Boot, Spring JPA, RabbitMQ and Angular (Clarity) frontend.\nGithub: https://github.com/gitorko/project90\nQuick Overview To deploy the application in a single command, clone the project, make sure no conflicting docker containers or ports are running and then run\n1git clone https://github.com/gitorko/project90 2cd project90 3docker-compose -f docker/docker-compose.yml up Open http://localhost:8080/\nRequirements A flash sale system that supports one item per user and reserving the item in a large scale flash sale.\nFunctional Requirements A flash sale system goes live on a particular date \u0026amp; time. Users should not be able to add to cart before that. There will be large number of users requesting to add the item to the cart at the specific moment in time. The items will be limited in stock. The 'add to cart' action must be honored in the order they were received. The first user to click on add to cart must get the item. As long as there are products each user requesting should get the item in the cart. Once the item is added to the cart rest of the process of checkout is beyond scope of this demo. A user can get just one item they should not be able to buy more than one item. User must be able to delete the item from the cart after which it should be available for other users. The authentication can be mocked to randomly assign a user to each browser instance. So each time you open the url in a different browser/tab it is assigned a unique user. User can logout and it will assign a new user. Non-Functional Requirements Latency to place the request should be low. System should be highly available \u0026amp; be able to handle burst of request traffic in short duration. System should scale well when number of users increases Implementation Design We will use a rabbitmq to queue the incoming burst of requests. Each request response time window will be kept as minimal as possible to avoid crashing the system under heavy load. Each user after placing the request to add to cart will be in wait state and query the status of his request. The backend and frontend bundle into a single uber jar that can be deployed on many servers there by providing ability to horizontally scale. The max limit of requests that can be served by a single instance then depend on the default tomcat thread pool size of 200 and the server configurations. If the user tries to book the item before sale begin date, it will fail. Add to cart will work only after the sale begins.\nIf the same user tries to book the item in 2 tabs only one will succeed, one user can buy only 1 item in the sale.\nTwo users can try to book the item at the same time each will be alloted a different item if its available.\nAfter adding item to cart user can remove the item from cart.\nAfter adding an item the client waits for the action to complete.\nCode 1package com.demo.project90.controller; 2 3import static com.demo.project90.config.Constant.ITEM_QUEUE; 4import static com.demo.project90.config.Constant.ITEM_SALE_NOT_STARTED_MSG; 5import static com.demo.project90.config.Constant.TOKEN_QUEUE; 6import static org.springframework.http.HttpStatus.NOT_FOUND; 7 8import java.time.Duration; 9import java.time.Instant; 10import java.util.UUID; 11 12import com.demo.project90.domain.Audit; 13import com.demo.project90.domain.Item; 14import com.demo.project90.model.QEvent; 15import com.demo.project90.model.QItem; 16import com.demo.project90.repo.AuditRepository; 17import com.demo.project90.repo.ItemRepository; 18import com.demo.project90.service.AuditService; 19import lombok.RequiredArgsConstructor; 20import lombok.extern.slf4j.Slf4j; 21import org.springframework.amqp.rabbit.connection.ConnectionFactory; 22import org.springframework.amqp.rabbit.core.RabbitTemplate; 23import org.springframework.web.bind.annotation.DeleteMapping; 24import org.springframework.web.bind.annotation.GetMapping; 25import org.springframework.web.bind.annotation.PathVariable; 26import org.springframework.web.bind.annotation.RestController; 27import org.springframework.web.server.ResponseStatusException; 28 29@RestController 30@Slf4j 31@RequiredArgsConstructor 32public class HomeController { 33 34 private final RabbitTemplate template; 35 private final ItemRepository itemRepo; 36 private final AuditRepository auditRepo; 37 private final AuditService auditService; 38 private final ConnectionFactory connectionFactory; 39 40 @GetMapping(value = \u0026#34;/api/user\u0026#34;) 41 public String getUser() { 42 return UUID.randomUUID().toString().substring(0, 7); 43 } 44 45 @GetMapping(value = \u0026#34;/api/items/count\u0026#34;) 46 public long getFreeItemCount() { 47 return itemRepo.countAllByCartOfIsNull(); 48 } 49 50 @GetMapping(value = \u0026#34;/api/cart/items/{username}\u0026#34;) 51 public Iterable\u0026lt;Item\u0026gt; getCartItems(@PathVariable String username) { 52 return itemRepo.findAllByCartOf(username); 53 } 54 55 @GetMapping(value = \u0026#34;/api/cart/{username}\u0026#34;) 56 public QEvent addCartItem(@PathVariable String username) { 57 Instant start = Instant.now(); 58 log.info(\u0026#34;username: {}\u0026#34;, username); 59 String token = UUID.randomUUID().toString(); 60 QEvent qEvent = QEvent.builder() 61 .user(username) 62 .token(token) 63 .attemptCount(0) 64 .build(); 65 if (!auditService.checkIfSaleStarted()) { 66 auditService.saveAudit(ITEM_SALE_NOT_STARTED_MSG, qEvent.getUser(), qEvent.getToken(), -1l, \u0026#34;FAIL\u0026#34;); 67 Instant finish = Instant.now(); 68 log.info(\u0026#34;Request rejected in: {} ms\u0026#34;, username, Duration.between(start, finish).toMillis()); 69 return qEvent; 70 } else { 71 template.convertAndSend(TOKEN_QUEUE, qEvent); 72 Instant finish = Instant.now(); 73 log.info(\u0026#34;Add to cart for {} took: {} ms\u0026#34;, username, Duration.between(start, finish).toMillis()); 74 return qEvent; 75 } 76 } 77 78 @DeleteMapping(value = \u0026#34;/api/cart/{username}/{id}\u0026#34;) 79 public boolean deleteCartItem(@PathVariable String username, @PathVariable Long id) { 80 itemRepo.findById(id).ifPresent(e -\u0026gt; { 81 //only user who owns the cart can delete 82 if (e.getCartOf().equals(username)) { 83 e.setCartOf(null); 84 e.setAddedOn(null); 85 itemRepo.save(e); 86 pushAvailableItem(QItem.builder().itemId(id).build()); 87 } 88 }); 89 90 return true; 91 } 92 93 @GetMapping(value = \u0026#34;/api/audit/{token}\u0026#34;) 94 public Audit getTokenMessage(@PathVariable String token) { 95 if (auditRepo.findByToken(token).isPresent()) { 96 return auditRepo.findByToken(token).get(); 97 } else { 98 throw new ResponseStatusException(NOT_FOUND, \u0026#34;token not found!\u0026#34;); 99 } 100 } 101 102 private void pushAvailableItem(QItem qItem) { 103 template.convertAndSend(ITEM_QUEUE, qItem); 104 } 105} 1package com.demo.project90.queue; 2 3import static com.demo.project90.config.Constant.ITEM_ADDED_TO_CART_MSG; 4import static com.demo.project90.config.Constant.ITEM_ALREADY_IN_CART_MSG; 5import static com.demo.project90.config.Constant.ITEM_MISMATCH_MSG; 6import static com.demo.project90.config.Constant.ITEM_QUEUE; 7import static com.demo.project90.config.Constant.ITEM_SALE_NOT_STARTED_MSG; 8import static com.demo.project90.config.Constant.ITEM_SOLD_OUT_MSG; 9import static com.demo.project90.config.Constant.ITEM_TYPE; 10import static com.demo.project90.config.Constant.TOKEN_QUEUE; 11 12import java.time.LocalDateTime; 13 14import com.demo.project90.domain.Item; 15import com.demo.project90.model.QEvent; 16import com.demo.project90.model.QItem; 17import com.demo.project90.repo.AuditRepository; 18import com.demo.project90.repo.ItemRepository; 19import com.demo.project90.service.AuditService; 20import com.fasterxml.jackson.databind.ObjectMapper; 21import com.rabbitmq.client.Channel; 22import com.rabbitmq.client.GetResponse; 23import lombok.RequiredArgsConstructor; 24import lombok.SneakyThrows; 25import lombok.extern.slf4j.Slf4j; 26import org.springframework.amqp.rabbit.annotation.RabbitListener; 27import org.springframework.amqp.rabbit.connection.Connection; 28import org.springframework.amqp.rabbit.connection.ConnectionFactory; 29import org.springframework.stereotype.Component; 30 31@Component 32@Slf4j 33@RequiredArgsConstructor 34public class EventListener { 35 36 private final ItemRepository itemRepo; 37 private final AuditRepository auditRepo; 38 private final AuditService auditService; 39 private final ConnectionFactory connectionFactory; 40 private ObjectMapper objectMapper = new ObjectMapper(); 41 42 @SneakyThrows 43 @RabbitListener(queues = TOKEN_QUEUE) 44 public void processRequest(QEvent qEvent) { 45 log.info(\u0026#34;Received qEvent: {}\u0026#34;, qEvent); 46 if (!auditService.checkIfSaleStarted()) { 47 auditService.saveAudit(ITEM_SALE_NOT_STARTED_MSG, qEvent.getUser(), qEvent.getToken(), -1l, \u0026#34;FAIL\u0026#34;); 48 return; 49 } 50 //check if user already has item in cart. 51 if (itemRepo.countByCartOfAndType(qEvent.getUser(), ITEM_TYPE) == 0) { 52 //Find the first available item. 53 QItem qItem = popAvailableItem(); 54 if (qItem == null) { 55 //sold out. 56 auditService.saveAudit(ITEM_SOLD_OUT_MSG, qEvent.getUser(), qEvent.getToken(), -1l, \u0026#34;FAIL\u0026#34;); 57 return; 58 } 59 Item item = itemRepo.findByIdAndCartOfIsNull(qItem.getItemId()); 60 if (item != null) { 61 //add to cart of user. 62 item.setCartOf(qEvent.getUser()); 63 item.setAddedOn(LocalDateTime.now()); 64 itemRepo.save(item); 65 auditService.saveAudit(String.format(ITEM_ADDED_TO_CART_MSG, item.getName()), qEvent.getUser(), qEvent.getToken(), item.getId(), \u0026#34;SUCCESS\u0026#34;); 66 } else { 67 auditService.saveAudit(ITEM_MISMATCH_MSG, qEvent.getUser(), qEvent.getToken(), -1l, \u0026#34;FAIL\u0026#34;); 68 } 69 } else { 70 //sold out. 71 auditService.saveAudit(ITEM_ALREADY_IN_CART_MSG, qEvent.getUser(), qEvent.getToken(), -1l, \u0026#34;FAIL\u0026#34;); 72 } 73 } 74 75 @SneakyThrows 76 private QItem popAvailableItem() { 77 try (Connection connection = connectionFactory.createConnection()) { 78 Channel channel = connection.createChannel(true); 79 GetResponse resp = channel.basicGet(ITEM_QUEUE, true); 80 if (resp != null) { 81 String message = new String(resp.getBody(), \u0026#34;UTF-8\u0026#34;); 82 return objectMapper.readValue(message, QItem.class); 83 } 84 return null; 85 } 86 } 87 88} 1package com.demo.project90.service; 2 3import static com.demo.project90.config.Constant.SALE_BEGINS_AFTER; 4 5import java.time.LocalDateTime; 6 7import com.demo.project90.domain.Audit; 8import com.demo.project90.repo.AuditRepository; 9import lombok.RequiredArgsConstructor; 10import lombok.extern.slf4j.Slf4j; 11import org.springframework.stereotype.Component; 12 13@Component 14@RequiredArgsConstructor 15@Slf4j 16public class AuditService { 17 private final AuditRepository auditRepo; 18 19 public void saveAudit(String message, String username, String token, Long itemId, String type) { 20 log.info(message); 21 //Note: Audit tables are always insert and no updates should happen. 22 auditRepo.save(Audit.builder() 23 .username(username) 24 .itemId(itemId) 25 .message(message) 26 .token(token) 27 .logDate(LocalDateTime.now()) 28 .type(type) 29 .build()); 30 } 31 32 public boolean checkIfSaleStarted() { 33 if (LocalDateTime.now().isAfter(SALE_BEGINS_AFTER)) { 34 return true; 35 } else { 36 return false; 37 } 38 } 39} 1\u0026lt;div class=\u0026#34;content-container\u0026#34;\u0026gt; 2 \u0026lt;div class=\u0026#34;content-area\u0026#34;\u0026gt; 3 \u0026lt;div class=\u0026#34;clr-row\u0026#34;\u0026gt; 4 \u0026lt;div class=\u0026#34;clr-col-12\u0026#34;\u0026gt; 5 \u0026lt;div class=\u0026#34;alert-section\u0026#34;\u0026gt; 6 \u0026lt;app-alert\u0026gt;\u0026lt;/app-alert\u0026gt; 7 \u0026lt;/div\u0026gt; 8 9 \u0026lt;h2 style=\u0026#34;text-align: center\u0026#34;\u0026gt;Flash Sale: {{itemCount}} items available\u0026lt;/h2\u0026gt; 10 \u0026lt;div style=\u0026#34;text-align: center;\u0026#34;\u0026gt; 11 \u0026lt;img src=\u0026#34;assets/flashsale.png\u0026#34; width=\u0026#34;200\u0026#34; height=\u0026#34;200\u0026#34; style=\u0026#34;text-align: center;\u0026#34;\u0026gt; 12 \u0026lt;br/\u0026gt; 13 \u0026lt;clr-spinner [clrMedium]=\u0026#34;true\u0026#34; *ngIf=\u0026#34;spinner\u0026#34; id=\u0026#34;spinner\u0026#34;\u0026gt;\u0026lt;/clr-spinner\u0026gt; 14 \u0026lt;p *ngIf=\u0026#34;spinner\u0026#34;\u0026gt;\u0026lt;b style=\u0026#34;color:red;\u0026#34;\u0026gt;Dont Refresh\u0026lt;/b\u0026gt;\u0026lt;/p\u0026gt; 15 \u0026lt;button type=\u0026#34;submit\u0026#34; class=\u0026#34;btn btn-primary\u0026#34; (click)=\u0026#34;addToCart()\u0026#34; *ngIf=\u0026#34;showAddToCartButton\u0026#34; 16 id=\u0026#34;addToCart\u0026#34;\u0026gt; 17 Add to cart 18 \u0026lt;/button\u0026gt; 19 \u0026lt;/div\u0026gt; 20 \u0026lt;br/\u0026gt; 21 \u0026lt;br/\u0026gt; 22 \u0026lt;h2 style=\u0026#34;text-align: center\u0026#34;\u0026gt;My Cart\u0026lt;/h2\u0026gt; 23 \u0026lt;clr-datagrid\u0026gt; 24 \u0026lt;clr-dg-column\u0026gt;Name\u0026lt;/clr-dg-column\u0026gt; 25 \u0026lt;clr-dg-column\u0026gt;Price\u0026lt;/clr-dg-column\u0026gt; 26 \u0026lt;clr-dg-column\u0026gt;Cart Of\u0026lt;/clr-dg-column\u0026gt; 27 \u0026lt;clr-dg-column\u0026gt;Added On\u0026lt;/clr-dg-column\u0026gt; 28 \u0026lt;clr-dg-column\u0026gt;Action\u0026lt;/clr-dg-column\u0026gt; 29 \u0026lt;!-- structural directive --\u0026gt; 30 \u0026lt;clr-dg-row clr-dg-row *clrDgItems=\u0026#34;let item of items\u0026#34;\u0026gt; 31 \u0026lt;clr-dg-placeholder class=\u0026#34;content-center\u0026#34;\u0026gt;No Items in Cart!\u0026lt;/clr-dg-placeholder\u0026gt; 32 \u0026lt;clr-dg-cell\u0026gt;{{item.name}}\u0026lt;/clr-dg-cell\u0026gt; 33 \u0026lt;clr-dg-cell\u0026gt;{{item.price}}\u0026lt;/clr-dg-cell\u0026gt; 34 \u0026lt;clr-dg-cell\u0026gt;{{item.cartOf}}\u0026lt;/clr-dg-cell\u0026gt; 35 \u0026lt;clr-dg-cell\u0026gt;{{item.addedOn}}\u0026lt;/clr-dg-cell\u0026gt; 36 \u0026lt;clr-dg-cell\u0026gt; 37 \u0026lt;cds-icon shape=\u0026#34;trash\u0026#34; style=\u0026#34;cursor: pointer; color: blue\u0026#34; (click)=\u0026#34;deleteCartFor(item.id)\u0026#34;\u0026gt; 38 \u0026lt;/cds-icon\u0026gt; 39 \u0026lt;/clr-dg-cell\u0026gt; 40 \u0026lt;/clr-dg-row\u0026gt; 41 42 \u0026lt;clr-dg-footer\u0026gt; 43 \u0026lt;clr-dg-pagination #pagination [clrDgPageSize]=\u0026#34;10\u0026#34;\u0026gt; 44 \u0026lt;clr-dg-page-size [clrPageSizeOptions]=\u0026#34;[10,20,50,100]\u0026#34;\u0026gt;Items per page\u0026lt;/clr-dg-page-size\u0026gt; 45 {{pagination.firstItem + 1}} - {{pagination.lastItem + 1}} of {{pagination.totalItems}} items 46 \u0026lt;/clr-dg-pagination\u0026gt; 47 \u0026lt;/clr-dg-footer\u0026gt; 48 \u0026lt;/clr-datagrid\u0026gt; 49 50 \u0026lt;/div\u0026gt; 51 \u0026lt;/div\u0026gt; 52 \u0026lt;/div\u0026gt; 53\u0026lt;/div\u0026gt; 54 1import {Component, OnInit, ViewChild} from \u0026#39;@angular/core\u0026#39;; 2import {RestService} from \u0026#39;../../services/rest.service\u0026#39;; 3import {Router} from \u0026#39;@angular/router\u0026#39;; 4import {AlertComponent} from \u0026#39;../alert/alert.component\u0026#39;; 5import {ClarityIcons, trashIcon} from \u0026#39;@cds/core/icon\u0026#39;; 6import {Item} from \u0026#34;../../models/item\u0026#34;; 7 8@Component({ 9 selector: \u0026#39;app-home\u0026#39;, 10 templateUrl: \u0026#39;./home.component.html\u0026#39;, 11 styleUrls: [] 12}) 13export class HomeComponent implements OnInit { 14 15 items: Item[] = []; 16 itemCount = 0; 17 // @ts-ignore 18 @ViewChild(AlertComponent, {static: true}) private alert: AlertComponent; 19 spinner = false; 20 showAddToCartButton = true; 21 token = \u0026#39;\u0026#39;; 22 23 constructor(private restService: RestService, private router: Router) { 24 ClarityIcons.addIcons(trashIcon); 25 } 26 27 ngOnInit(): void { 28 this.refresh(); 29 this.token = \u0026#39;\u0026#39;; 30 } 31 32 refresh(): void { 33 this.getCartItems(); 34 this.getItemCount(); 35 } 36 37 getCartItems(): void { 38 const username = sessionStorage.getItem(\u0026#39;user\u0026#39;); 39 this.restService.getCartItems(username).subscribe(data =\u0026gt; { 40 this.items = data; 41 if (this.items.length \u0026gt; 0) { 42 this.showAddToCartButton = false; 43 } 44 }); 45 } 46 47 getItemCount(): void { 48 this.restService.getFreeItemCount().subscribe(data =\u0026gt; { 49 this.itemCount = data; 50 if (this.itemCount === 0) { 51 this.showAddToCartButton = false; 52 } 53 }); 54 } 55 56 addToCart(): void { 57 const username = sessionStorage.getItem(\u0026#39;user\u0026#39;); 58 this.showAddToCartButton = false; 59 this.spinner = true; 60 this.restService.addCartItem(username) 61 .subscribe(data =\u0026gt; { 62 if (data) { 63 this.token = data.token; 64 this.alert.showSuccess(\u0026#39;In Queue!\u0026#39;); 65 } else { 66 this.alert.showError(\u0026#39;Failed to enter Queue!\u0026#39;); 67 } 68 this.checkIfComplete(); 69 }); 70 } 71 72 checkIfComplete(): void { 73 this.restService.getAuditToken(this.token) 74 .subscribe(data =\u0026gt; { 75 if (data) { 76 if (data.type === \u0026#39;SUCCESS\u0026#39;) { 77 this.alert.showSuccess(data.message); 78 } else { 79 this.alert.showError(data.message); 80 } 81 this.refresh(); 82 this.spinner = false; 83 } 84 }, 85 error =\u0026gt; { 86 setTimeout( 87 () =\u0026gt; { 88 this.checkIfComplete(); 89 }, 90 5000 91 ); 92 }); 93 } 94 95 deleteCartFor(id: any): void { 96 const username = sessionStorage.getItem(\u0026#39;user\u0026#39;); 97 this.restService.deleteCartItem(username, id) 98 .subscribe(data =\u0026gt; { 99 if (data) { 100 this.items = []; 101 this.alert.showSuccess(\u0026#39;Deleted from cart!\u0026#39;); 102 } else { 103 this.alert.showError(\u0026#39;Failed to delete from cart!\u0026#39;); 104 } 105 this.refresh(); 106 }); 107 } 108 109} Setup 1# Project 90 2 3Flash Sale + RabbitMQ + Postgres + Jmeter 4 5[https://gitorko.github.io/flash-sale-system/](https://gitorko.github.io/flash-sale-system/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk version \u0026#34;21.0.3\u0026#34; 2024-04-16 LTS 14 15node --version 16v16.16.0 17 18yarn --version 191.22.18 20``` 21 22### Postgres DB 23 24``` 25docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:9.6.10 26docker ps 27docker exec -it pg-container psql -U postgres -W postgres 28CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 29CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 30grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 31 32docker stop pg-container 33docker start pg-container 34``` 35 36### RabbitMQ 37 38``` 39docker run -d -p 5672:5672 -p 15672:15672 --name my-rabbit rabbitmq:3-management 40``` 41 42Open [http://localhost:15672/](http://localhost:15672/) 43 44```bash 45user: guest 46pwd: guest 47``` 48 49### Dev 50 51To run the backend in dev mode Postgres DB is needed to run the integration tests during build. 52 53```bash 54./gradlew clean build 55./gradlew bootRun 56``` 57 58To Run UI in dev mode 59 60```bash 61cd ui 62yarn install 63yarn build 64yarn start 65``` 66 67Open [http://localhost:4200/](http://localhost:4200/) 68 69### Prod 70 71To run as a single jar, both UI and backend are bundled to single uber jar. 72 73```bash 74./gradlew cleanBuild 75cd build/libs 76java -jar project90-1.0.0.jar 77``` 78 79Open [http://localhost:8080/](http://localhost:8080/) 80 81### JMeter \u0026amp; Selenium 82 83To test for concurrent requests and load test the UI you can use JMeter with selenium plugin 84 85```bash 86brew install jmeter 87xattr -d com.apple.quarantine chromedriver 88``` 89 90Install the selenium plugin for JMeter 91 92[https://jmeter-plugins.org/](https://jmeter-plugins.org/) 93 94Download the chrome driver 95 96[https://chromedriver.chromium.org/downloads](https://chromedriver.chromium.org/downloads) 97 98### Docker 99 100```bash 101./gradlew cleanBuild 102docker build -f docker/Dockerfile --force-rm -t project90:1.0.0 . 103docker images |grep project90 104docker tag project90:1.0.0 gitorko/project90:1.0.0 105docker push gitorko/project90:1.0.0 106docker-compose -f docker/docker-compose.yml up 107``` Testing Click on start button to test multiple requests to add to cart. The resources of the system are\nTomcat server has default 200 worker threads. Each 'add to cart' request takes average 20 ms for the above resources. 200/0.02 = 10,000 requests can be handled per second. Reducing this by load factor due to GC and context switching of 0.8 (80%) gives us 10000 x 0.8 = 8000 requests per second.\nThis setup can be deployed on multi node scenario, as the tokens are fetched from RabbitMQ queue it will scale on a distributed setup. Further optimization can be done by having region dedicated queue sharding and region specific event processor.\nFor authenticated sessions DOS (Denial Of Service) attacks are not a concern, if you still need to ensure against DOS attacks you can use a Captcha.\nOnce you add more servers to handle the request the bottleneck shifts to RabbitMQ capability to handle load and we can then explore clustering in RabbitMQ. The queue can be made persistent so that events survive a restart.\nReferences https://clarity.design/\nhttps://spring.io/projects/spring-boot\nhttps://www.rabbitmq.com/\nhttps://hackernoon.com/developing-a-flash-sale-system-7481f6ede0a3\n","link":"https://gitorko.github.io/post/flash-sale-system/","section":"post","tags":["flash-sale","rabbitmq","clarity","jmeter","selenium"],"title":"Flash Sale System"},{"body":"","link":"https://gitorko.github.io/tags/flash-sale/","section":"tags","tags":null,"title":"Flash-Sale"},{"body":"","link":"https://gitorko.github.io/categories/jmeter/","section":"categories","tags":null,"title":"JMeter"},{"body":"","link":"https://gitorko.github.io/tags/rabbitmq/","section":"tags","tags":null,"title":"Rabbitmq"},{"body":"","link":"https://gitorko.github.io/tags/selenium/","section":"tags","tags":null,"title":"Selenium"},{"body":"","link":"https://gitorko.github.io/categories/selenium/","section":"categories","tags":null,"title":"Selenium"},{"body":"","link":"https://gitorko.github.io/categories/springboot/","section":"categories","tags":null,"title":"SpringBoot"},{"body":"Clarity provides Server-Driven DataGrid. Using Query DSL we will fetch page by page data and render it in clarity server-driven data grid\nGithub: https://github.com/gitorko/project86\nQuick Overview To deploy the application in a single command, clone the project, make sure no conflicting docker containers or ports are running and then run\n1git clone https://github.com/gitorko/project86 2cd project86 3docker-compose -f docker/docker-compose.yml up Open http://localhost:8080/\nServer-Driven DataGrid When dealing with large amounts of data or heavy processing, a DataGrid often has to access the currently displayed data only, requesting only the necessary pieces of data from the server.\nImplementation Design Code 1package com.demo.project86.controller; 2 3import com.demo.project86.domain.Customer; 4import com.demo.project86.domain.CustomerBinderCustomizer; 5import com.demo.project86.repo.CustomerRepository; 6import lombok.RequiredArgsConstructor; 7import lombok.extern.slf4j.Slf4j; 8import org.springframework.beans.factory.annotation.Autowired; 9import org.springframework.data.domain.Page; 10import org.springframework.data.domain.Pageable; 11import org.springframework.data.querydsl.binding.QuerydslPredicate; 12import org.springframework.data.web.PageableDefault; 13import org.springframework.web.bind.annotation.DeleteMapping; 14import org.springframework.web.bind.annotation.GetMapping; 15import org.springframework.web.bind.annotation.PathVariable; 16import org.springframework.web.bind.annotation.PostMapping; 17import org.springframework.web.bind.annotation.RequestBody; 18import org.springframework.web.bind.annotation.RestController; 19 20@RestController 21@Slf4j 22@RequiredArgsConstructor 23public class HomeController { 24 25 @Autowired 26 CustomerRepository customerRepo; 27 28 @GetMapping(value = \u0026#34;/api/customer\u0026#34;) 29 public Page\u0026lt;Customer\u0026gt; getCustomers(@PageableDefault(size = 20) Pageable pageRequest, 30 @QuerydslPredicate(root = Customer.class, bindings = CustomerBinderCustomizer.class) com.querydsl.core.types.Predicate predicate) { 31 return customerRepo.findAll(predicate, pageRequest); 32 } 33 34 @PostMapping(value = \u0026#34;/api/customer\u0026#34;) 35 public Customer saveCustomer(@RequestBody Customer customer) { 36 log.info(\u0026#34;Saving customer!\u0026#34;); 37 return customerRepo.save(customer); 38 } 39 40 @DeleteMapping(value = \u0026#34;/api/customer/{id}\u0026#34;) 41 public void deleteCustomer(@PathVariable Long id) { 42 log.info(\u0026#34;Deleting customer: {}\u0026#34;, id); 43 customerRepo.deleteById(id); 44 } 45 46} 1package com.demo.project86.domain; 2 3import java.io.Serializable; 4import jakarta.persistence.Basic; 5import jakarta.persistence.Column; 6import jakarta.persistence.Entity; 7import jakarta.persistence.GeneratedValue; 8import jakarta.persistence.GenerationType; 9import jakarta.persistence.Id; 10import jakarta.persistence.Table; 11import jakarta.validation.constraints.Size; 12 13import lombok.AllArgsConstructor; 14import lombok.Builder; 15import lombok.Data; 16import lombok.NoArgsConstructor; 17 18@Entity 19@Table(name = \u0026#34;customer\u0026#34;) 20@Data 21@Builder 22@AllArgsConstructor 23@NoArgsConstructor 24public class Customer implements Serializable { 25 26 private static final long serialVersionUID = 1L; 27 @Id 28 @GeneratedValue(strategy = GenerationType.AUTO) 29 @Basic(optional = false) 30 @Column(name = \u0026#34;id\u0026#34;) 31 private Long id; 32 @Size(max = 45) 33 @Column(name = \u0026#34;first_name\u0026#34;) 34 private String firstName; 35 @Size(max = 45) 36 @Column(name = \u0026#34;last_name\u0026#34;) 37 private String lastName; 38 private String city; 39 40} 1package com.demo.project86.domain; 2 3import java.util.Collection; 4import java.util.Optional; 5 6import com.querydsl.core.BooleanBuilder; 7import com.querydsl.core.types.Predicate; 8import com.querydsl.core.types.dsl.StringPath; 9import org.springframework.data.querydsl.binding.MultiValueBinding; 10import org.springframework.data.querydsl.binding.QuerydslBinderCustomizer; 11import org.springframework.data.querydsl.binding.QuerydslBindings; 12 13public class CustomerBinderCustomizer implements QuerydslBinderCustomizer\u0026lt;QCustomer\u0026gt; { 14 15 @Override 16 public void customize(QuerydslBindings querydslBindings, QCustomer qCustomer) { 17 querydslBindings.including( 18 qCustomer.id, 19 qCustomer.firstName, 20 qCustomer.lastName, 21 qCustomer.city 22 ); 23 24 StringPath[] multiPropertySearchPaths = new StringPath[]{qCustomer.firstName, qCustomer.lastName, qCustomer.city}; 25 26 querydslBindings.bind(multiPropertySearchPaths).all(new MultiValueBinding\u0026lt;\u0026gt;() { 27 @Override 28 public Optional\u0026lt;Predicate\u0026gt; bind(StringPath path, Collection\u0026lt;? extends String\u0026gt; values) { 29 BooleanBuilder predicate = new BooleanBuilder(); 30 values.forEach(value -\u0026gt; predicate.or(path.containsIgnoreCase(value))); 31 return Optional.of(predicate); 32 } 33 }); 34 35 } 36} 1package com.demo.project86.repo; 2 3import com.demo.project86.domain.Customer; 4import org.springframework.data.jpa.repository.JpaRepository; 5import org.springframework.data.querydsl.QuerydslPredicateExecutor; 6 7public interface CustomerRepository extends JpaRepository\u0026lt;Customer, Long\u0026gt;, QuerydslPredicateExecutor\u0026lt;Customer\u0026gt; { 8} 1\u0026lt;div class=\u0026#34;content-container\u0026#34;\u0026gt; 2 \u0026lt;div class=\u0026#34;content-area\u0026#34;\u0026gt; 3 \u0026lt;div class=\u0026#34;alert-section\u0026#34;\u0026gt; 4 \u0026lt;app-alert\u0026gt;\u0026lt;/app-alert\u0026gt; 5 \u0026lt;/div\u0026gt; 6 7 \u0026lt;div class=\u0026#34;clr-row\u0026#34;\u0026gt; 8 \u0026lt;div class=\u0026#34;clr-col-12\u0026#34;\u0026gt; 9 \u0026lt;h2 style=\u0026#34;text-align: center\u0026#34;\u0026gt;Customers\u0026lt;/h2\u0026gt; 10 \u0026lt;clr-datagrid [clrDgLoading]=\u0026#34;loading\u0026#34; (clrDgRefresh)=\u0026#34;refresh($event)\u0026#34;\u0026gt; 11 \u0026lt;clr-dg-placeholder class=\u0026#34;content-center\u0026#34;\u0026gt;No Customers!\u0026lt;/clr-dg-placeholder\u0026gt; 12 \u0026lt;clr-dg-column [clrDgField]=\u0026#34;\u0026#39;id\u0026#39;\u0026#34;\u0026gt;ID\u0026lt;/clr-dg-column\u0026gt; 13 \u0026lt;clr-dg-column [clrDgField]=\u0026#34;\u0026#39;firstName\u0026#39;\u0026#34;\u0026gt; 14 \u0026lt;ng-container *clrDgHideableColumn=\u0026#34;{hidden: false}\u0026#34;\u0026gt;First Name\u0026lt;/ng-container\u0026gt; 15 \u0026lt;/clr-dg-column\u0026gt; 16 \u0026lt;clr-dg-column [clrDgField]=\u0026#34;\u0026#39;lastName\u0026#39;\u0026#34;\u0026gt; 17 \u0026lt;ng-container *clrDgHideableColumn=\u0026#34;{hidden: false}\u0026#34;\u0026gt;Last Name\u0026lt;/ng-container\u0026gt; 18 \u0026lt;/clr-dg-column\u0026gt; 19 \u0026lt;clr-dg-column [clrDgField]=\u0026#34;\u0026#39;city\u0026#39;\u0026#34;\u0026gt; 20 \u0026lt;ng-container *clrDgHideableColumn=\u0026#34;{hidden: false}\u0026#34;\u0026gt;City\u0026lt;/ng-container\u0026gt; 21 \u0026lt;clr-dg-filter [clrDgFilter]=\u0026#34;cityFilter\u0026#34;\u0026gt; 22 \u0026lt;app-checkbox-filter 23 #cityFilter 24 [filterValues]=\u0026#34;cityFilterValues\u0026#34; 25 filterKey=\u0026#34;city\u0026#34;\u0026gt;\u0026lt;/app-checkbox-filter\u0026gt; 26 \u0026lt;/clr-dg-filter\u0026gt; 27 \u0026lt;/clr-dg-column\u0026gt; 28 \u0026lt;clr-dg-column\u0026gt;Action\u0026lt;/clr-dg-column\u0026gt; 29 \u0026lt;!-- structural directive --\u0026gt; 30 \u0026lt;clr-dg-row *ngFor=\u0026#34;let customer of customerPage?.content\u0026#34;\u0026gt; 31 \u0026lt;clr-dg-cell\u0026gt;{{customer.id}}\u0026lt;/clr-dg-cell\u0026gt; 32 \u0026lt;clr-dg-cell\u0026gt;{{customer.firstName}}\u0026lt;/clr-dg-cell\u0026gt; 33 \u0026lt;clr-dg-cell\u0026gt;{{customer.lastName}}\u0026lt;/clr-dg-cell\u0026gt; 34 \u0026lt;clr-dg-cell\u0026gt;{{customer.city}}\u0026lt;/clr-dg-cell\u0026gt; 35 \u0026lt;clr-dg-cell\u0026gt; 36 \u0026lt;cds-icon shape=\u0026#34;trash\u0026#34; style=\u0026#34;cursor: pointer; color: blue\u0026#34; (click)=\u0026#34;deleteCustomer(customer)\u0026#34;\u0026gt; 37 \u0026lt;/cds-icon\u0026gt; 38 \u0026lt;/clr-dg-cell\u0026gt; 39 \u0026lt;/clr-dg-row\u0026gt; 40 41 \u0026lt;clr-dg-footer\u0026gt; 42 \u0026lt;clr-dg-pagination #pagination [clrDgPageSize]=\u0026#34;10\u0026#34; [(clrDgPage)]=\u0026#34;page\u0026#34; 43 [clrDgTotalItems]=\u0026#34;total\u0026#34;\u0026gt; 44 \u0026lt;clr-dg-page-size [clrPageSizeOptions]=\u0026#34;[10,20,50,100]\u0026#34;\u0026gt;Customers per page\u0026lt;/clr-dg-page-size\u0026gt; 45 {{pagination.firstItem + 1}} - {{pagination.lastItem + 1}} of {{pagination.totalItems}} customers 46 \u0026lt;/clr-dg-pagination\u0026gt; 47 \u0026lt;/clr-dg-footer\u0026gt; 48 49 \u0026lt;/clr-datagrid\u0026gt; 50 \u0026lt;/div\u0026gt; 51 \u0026lt;/div\u0026gt; 52 53 \u0026lt;div class=\u0026#34;clr-row\u0026#34;\u0026gt; 54 \u0026lt;div class=\u0026#34;clr-col-12\u0026#34;\u0026gt; 55 \u0026lt;form class=\u0026#34;clr-form clr-form-horizontal\u0026#34;\u0026gt; 56 \u0026lt;div class=\u0026#34;clr-form-control\u0026#34;\u0026gt; 57 \u0026lt;label for=\u0026#34;firstName\u0026#34; class=\u0026#34;clr-control-label\u0026#34;\u0026gt;First Name\u0026lt;/label\u0026gt; 58 \u0026lt;div class=\u0026#34;clr-control-container\u0026#34;\u0026gt; 59 \u0026lt;div class=\u0026#34;clr-input-wrapper\u0026#34;\u0026gt; 60 \u0026lt;!-- two way data binding --\u0026gt; 61 \u0026lt;input type=\u0026#34;text\u0026#34; [(ngModel)]=\u0026#34;customer.firstName\u0026#34; id=\u0026#34;firstName\u0026#34; name=\u0026#34;firstName\u0026#34; 62 placeholder=\u0026#34;Placeholder\u0026#34; class=\u0026#34;clr-input\u0026#34;/\u0026gt; 63 \u0026lt;/div\u0026gt; 64 \u0026lt;/div\u0026gt; 65 \u0026lt;/div\u0026gt; 66 \u0026lt;div class=\u0026#34;clr-form-control\u0026#34;\u0026gt; 67 \u0026lt;label for=\u0026#34;lastName\u0026#34; class=\u0026#34;clr-control-label\u0026#34;\u0026gt;Last Name\u0026lt;/label\u0026gt; 68 \u0026lt;div class=\u0026#34;clr-control-container\u0026#34;\u0026gt; 69 \u0026lt;div class=\u0026#34;clr-input-wrapper\u0026#34;\u0026gt; 70 \u0026lt;input [(ngModel)]=\u0026#34;customer.lastName\u0026#34; type=\u0026#34;text\u0026#34; id=\u0026#34;lastName\u0026#34; name=\u0026#34;lastName\u0026#34; 71 placeholder=\u0026#34;Placeholder\u0026#34; class=\u0026#34;clr-input\u0026#34;/\u0026gt; 72 \u0026lt;/div\u0026gt; 73 \u0026lt;/div\u0026gt; 74 \u0026lt;/div\u0026gt; 75 \u0026lt;div class=\u0026#34;clr-form-control\u0026#34;\u0026gt; 76 \u0026lt;div class=\u0026#34;clr-control-container\u0026#34;\u0026gt; 77 \u0026lt;!-- event binding --\u0026gt; 78 \u0026lt;button type=\u0026#34;submit\u0026#34; class=\u0026#34;btn btn-primary\u0026#34; (click)=\u0026#34;saveCustomer()\u0026#34;\u0026gt;Save\u0026lt;/button\u0026gt; 79 \u0026lt;/div\u0026gt; 80 \u0026lt;/div\u0026gt; 81 \u0026lt;/form\u0026gt; 82 \u0026lt;/div\u0026gt; 83 \u0026lt;/div\u0026gt; 84 85 \u0026lt;/div\u0026gt; 86\u0026lt;/div\u0026gt; The debounceTime added to debounce the events so that rest api doesn't get called for every keystroke.\n1import {Component, OnInit, ViewChild} from \u0026#39;@angular/core\u0026#39;; 2import {Customer} from \u0026#39;../../models/customer\u0026#39;; 3import {RestService} from \u0026#39;../../services/rest.service\u0026#39;; 4import {Router} from \u0026#39;@angular/router\u0026#39;; 5import {ClarityIcons, trashIcon} from \u0026#39;@cds/core/icon\u0026#39;; 6import {ClrDatagridStateInterface} from \u0026#39;@clr/angular\u0026#39;; 7import {CustomerPage} from \u0026#34;../../models/customer-page\u0026#34;; 8import {AlertComponent} from \u0026#34;../alert/alert.component\u0026#34;; 9import {Subject} from \u0026#34;rxjs\u0026#34;; 10import {debounceTime} from \u0026#34;rxjs/operators\u0026#34;; 11 12@Component({ 13 selector: \u0026#39;app-home\u0026#39;, 14 templateUrl: \u0026#39;./home.component.html\u0026#39;, 15 styleUrls: [] 16}) 17export class HomeComponent implements OnInit { 18 19 customerPage: CustomerPage = new CustomerPage(); 20 customer: Customer = new Customer(); 21 loading = false; 22 page: number = 1; 23 total: number = 1; 24 cityFilterValues: string[] = []; 25 tableState: ClrDatagridStateInterface = {page: {current: 1, from: 1, size: 10, to: 10}}; 26 debouncer: Subject\u0026lt;any\u0026gt; = new Subject\u0026lt;any\u0026gt;(); 27 28 // @ts-ignore 29 @ViewChild(AlertComponent, {static: true}) private alert: AlertComponent; 30 31 constructor(private restService: RestService, private router: Router) { 32 ClarityIcons.addIcons(trashIcon); 33 this.cityFilterValues.push(\u0026#34;Bangalore\u0026#34;); 34 this.cityFilterValues.push(\u0026#34;New York\u0026#34;); 35 this.cityFilterValues.push(\u0026#34;London\u0026#34;); 36 } 37 38 ngOnInit(): void { 39 this.loading = true; 40 this.debouncer 41 .pipe(debounceTime(700)) 42 .subscribe(state =\u0026gt; { 43 this.tableState = state; 44 this.loading = true; 45 if (!state.page) { 46 state.page = { 47 from: 1, 48 to: 10, 49 size: 10, 50 }; 51 } 52 // @ts-ignore 53 let pageStart = state.page.current - 1; 54 let pageSize = state.page.size; 55 this.restService.getCustomers(pageStart, pageSize, state.filters, state.sort).subscribe(data =\u0026gt; { 56 this.customerPage = data; 57 this.total = this.customerPage?.totalElements; 58 this.loading = false; 59 }, 60 error =\u0026gt; { 61 this.loading = false; 62 }); 63 } 64 ); 65 } 66 67 saveCustomer(): void { 68 console.log(\u0026#39;save customer!\u0026#39;); 69 this.restService.saveCustomer(this.customer) 70 .subscribe(data =\u0026gt; { 71 this.alert.showSuccess(\u0026#39;Saved customer: \u0026#39; + this.customer.firstName); 72 this.refresh(this.tableState); 73 }); 74 } 75 76 deleteCustomer(customer: Customer): void { 77 console.log(\u0026#39;deleting customer : \u0026#39; + customer.id); 78 this.restService.deleteCustomer(customer.id) 79 .subscribe(data =\u0026gt; { 80 this.alert.showSuccess(\u0026#39;Deleted customer: \u0026#39; + customer.id); 81 this.refresh(this.tableState); 82 }); 83 } 84 85 refresh(state: ClrDatagridStateInterface) { 86 this.debouncer.next(state); 87 } 88 89} Setup 1# Project 86 2 3Clarity - Server Driven Data Grid with QueryDSL 4 5[https://gitorko.github.io/clarity-server-driven-datagrid/](https://gitorko.github.io/clarity-server-driven-datagrid/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk version \u0026#34;21.0.3\u0026#34; 2024-04-16 LTS 14 15node --version 16v16.16.0 17 18yarn --version 191.22.18 20``` 21 22### Postgres DB 23 24``` 25docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:9.6.10 26docker ps 27docker exec -it pg-container psql -U postgres -W postgres 28CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 29CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 30grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 31 32docker stop pg-container 33docker start pg-container 34``` 35 36### Dev 37 38To Run backend in dev mode 39 40```bash 41cd project86 42./gradlew bootRun 43``` 44 45To Run UI in dev mode 46 47```bash 48cd ui 49yarn install 50yarn build 51yarn start 52``` 53 54Open [http://localhost:4200/](http://localhost:4200/) 55 56### Prod 57 58To run as a single jar, both UI and backend are bundled to single uber jar. 59 60```bash 61./gradlew cleanBuild 62cd project86/build/libs 63java -jar project86-1.0.0.jar 64``` 65 66Open [http://localhost:8080/](http://localhost:8080/) 67 68### Docker 69 70```bash 71./gradlew cleanBuild 72docker build -f docker/Dockerfile --force-rm -t project86:1.0.0 . 73docker images |grep project86 74docker tag project86:1.0.0 gitorko/project86:1.0.0 75docker push gitorko/project86:1.0.0 76docker-compose -f docker/docker-compose.yml up 77``` References https://clarity.design/\nhttps://clarity.design/angular-components/datagrid/#server-driven-datagrid\n","link":"https://gitorko.github.io/post/clarity-server-driven-datagrid/","section":"post","tags":["server-driven","clarity","datagrid","querydsl"],"title":"Clarity Server-Driven DataGrid"},{"body":"","link":"https://gitorko.github.io/tags/datagrid/","section":"tags","tags":null,"title":"Datagrid"},{"body":"","link":"https://gitorko.github.io/categories/optimistic-locking/","section":"categories","tags":null,"title":"Optimistic-Locking"},{"body":"","link":"https://gitorko.github.io/tags/querydsl/","section":"tags","tags":null,"title":"Querydsl"},{"body":"","link":"https://gitorko.github.io/categories/querydsl/","section":"categories","tags":null,"title":"QueryDSL"},{"body":"","link":"https://gitorko.github.io/tags/server-driven/","section":"tags","tags":null,"title":"Server-Driven"},{"body":"A Ticket Booking system developed with Spring Boot, Spring JPA, Redis and Angular (Clarity) frontend.\nGithub: https://github.com/gitorko/project87\nQuick Overview To deploy the application in a single command, clone the project, make sure no conflicting docker containers or ports are running and then run\n1git clone https://github.com/gitorko/project87 2cd project87 3docker-compose -f docker/docker-compose.yml up Open http://localhost:8080/\nFeatures A ticket booking application that support concurrent ticket booking for multiple users along with automatic unlock of blocked tickets. Provide QR code ticket and completes the ticketing flow on admit.\nFunctional Requirements A ticket booking system where users can book tickets. Two users cant book the same ticket. Authentication can be simulated to randomly assign a user to each browser instance. Each browser session considered as a unique user. Logout should assign a new user to the session. User should be able to block a ticket before making payment. Other user should not be able to block the same ticket. If the user doesnt complete the payment in 30 seconds the ticket which is blocked should be released back to the free pool. After blocking a ticket user can cancel the ticket, this should release the ticket back to the free pool. If the user tries to confirm the ticket after blocking wait for 30 seconds the booking should fail. Same user should be able to book the same ticket twice from two different browser sessions. Only user who has blocked the ticket can confirm the ticket. If user is looking at stale data, the ticket is already booked by other user then the transaction should fail. Should generate QR code as ticket Scanning the QR code should indicate that user is admitted into the venue. A single booking can book N tickets. Non-Functional Requirements Latency should be low. System should be highly available. System should scale well when number of users increases. We will use a fixed rate scheduler to release any tickets held for more than 30 seconds. Implementation Design We will postgres DB to persist the booking data. We will use optimistic locking as it scales well without locking the db rows. Two users can try to book the same ticket at the same time. It uses optimistic locking to RESERVE a ticket for one user and throws ObjectOptimisticLockingFailureException for the other user.\nWhile the first user is waiting to confirm, if the second user tries to book the same ticket it fails.\nIf the first user doesn't complete the payment confirmation within 30 seconds the lock on the ticket is released. If the first user presses cancel button then also the lock on the RESERVED ticket is released.\nIf the first user tries to confirm the ticket after 30 seconds then the booking fails as ticket is held in RESERVED state for a user only for 30 seconds.\nIf the same user tries to book the same seat from 2 different windows, one will succeed while other will throw error\nBackend api ensure that only user who RESERVED the ticket can book the ticket. So a ticket RESERVED by first user cant be booked by second user.\nIf the second user hasn't refreshed his screen and tries to book already BOOKED tickets it will fail\nQR code is generated for each ticket, clicking on the ticket takes you to the QR code.\nCan also be fetched via postman\nOn scanning the QR code in your mobile and visiting the uri provided the state is marked as entered completing the ticketing flow.\nEntered indicates that user has been admitted to the event on showing the QR code ticket. You can now track who booked the ticket and if they visited the event using the QR code ticket.\nCode 1package com.demo.project87.controller; 2 3import java.io.ByteArrayOutputStream; 4import java.io.IOException; 5import java.net.InetAddress; 6import java.time.LocalDateTime; 7import java.util.HashSet; 8import java.util.Set; 9import java.util.UUID; 10import jakarta.transaction.Transactional; 11 12import com.demo.project87.domain.BookingRequest; 13import com.demo.project87.domain.Ticket; 14import com.demo.project87.repository.TicketRepository; 15import com.google.zxing.BarcodeFormat; 16import com.google.zxing.WriterException; 17import com.google.zxing.client.j2se.MatrixToImageWriter; 18import com.google.zxing.common.BitMatrix; 19import com.google.zxing.qrcode.QRCodeWriter; 20import lombok.RequiredArgsConstructor; 21import lombok.extern.slf4j.Slf4j; 22import org.springframework.beans.factory.annotation.Autowired; 23import org.springframework.http.MediaType; 24import org.springframework.orm.ObjectOptimisticLockingFailureException; 25import org.springframework.scheduling.annotation.Scheduled; 26import org.springframework.web.bind.annotation.GetMapping; 27import org.springframework.web.bind.annotation.PathVariable; 28import org.springframework.web.bind.annotation.PostMapping; 29import org.springframework.web.bind.annotation.RequestBody; 30import org.springframework.web.bind.annotation.ResponseBody; 31import org.springframework.web.bind.annotation.RestController; 32 33@RestController 34@Slf4j 35@RequiredArgsConstructor 36public class HomeController { 37 38 private static final Integer EXPIRY_TTL_SECS = 30; 39 40 @Autowired 41 TicketRepository ticketRepo; 42 43 @GetMapping(value = \u0026#34;/api/user\u0026#34;) 44 public String getUser() { 45 return UUID.randomUUID().toString().substring(0, 7); 46 } 47 48 @GetMapping(value = \u0026#34;/api/tickets\u0026#34;) 49 public Iterable\u0026lt;Ticket\u0026gt; getTickets() { 50 return ticketRepo.findAllByOrderByIdAsc(); 51 } 52 53 @PostMapping(value = \u0026#34;/api/ticket\u0026#34;) 54 public Boolean bookTicket(@RequestBody BookingRequest bookingRequest) { 55 log.info(\u0026#34;Confirming Booking! {}\u0026#34;, bookingRequest); 56 return confirmBooking(bookingRequest); 57 } 58 59 @PostMapping(value = \u0026#34;/api/hold\u0026#34;) 60 public Boolean holdBooking(@RequestBody BookingRequest bookingRequest) { 61 log.info(\u0026#34;Holding booking tickets! {}\u0026#34;, bookingRequest); 62 return bookingHoldCall(bookingRequest, true); 63 } 64 65 @PostMapping(value = \u0026#34;/api/cancel\u0026#34;) 66 public Boolean cancelBooking(@RequestBody BookingRequest bookingRequest) { 67 log.info(\u0026#34;Cancelling booking tickets! {}\u0026#34;, bookingRequest); 68 return bookingHoldCall(bookingRequest, false); 69 } 70 71 @GetMapping(value = \u0026#34;/api/admit/{entryToken}\u0026#34;) 72 public String admit(@PathVariable String entryToken) { 73 Ticket ticket = ticketRepo.findByEntryTokenIs(entryToken); 74 if (ticketRepo.findByEntryTokenIs(entryToken) != null) { 75 ticket.setEntered(true); 76 ticketRepo.save(ticket); 77 return \u0026#34;ADMIT\u0026#34;; 78 } else { 79 return \u0026#34;INVALID\u0026#34;; 80 } 81 } 82 83 @GetMapping(value = \u0026#34;/api/qrcode/{entryToken}\u0026#34;, produces = MediaType.IMAGE_JPEG_VALUE) 84 public @ResponseBody byte[] getQRCode(@PathVariable String entryToken) { 85 Ticket ticket = ticketRepo.findByEntryTokenIs(entryToken); 86 if (ticketRepo.findByEntryTokenIs(entryToken) != null) { 87 return ticket.getQrCode(); 88 } else { 89 return null; 90 } 91 } 92 93 @Transactional 94 public Boolean confirmBooking(BookingRequest bookingRequest) { 95 try { 96 Iterable\u0026lt;Ticket\u0026gt; ticketSet = ticketRepo.findAllById(bookingRequest.getTicketIds()); 97 Set\u0026lt;Ticket\u0026gt; tickets = new HashSet\u0026lt;\u0026gt;(); 98 for (Ticket ticket : ticketSet) { 99 tickets.add(ticket); 100 //Only person who held the lock can complete the booking. 101 if (ticket.getLockedBy().equals(bookingRequest.getUser())) { 102 ticket.setLockedBy(\u0026#34;\u0026#34;); 103 ticket.setBooked(true); 104 ticket.setBookedBy(bookingRequest.getUser()); 105 106 //Create the QR code for the ticket and store to DB. 107 String entryToken = UUID.randomUUID().toString(); 108 ticket.setEntryToken(entryToken); 109 String hostName = InetAddress.getLocalHost().getHostAddress(); 110 String entryUri = \u0026#34;http://\u0026#34; + hostName + \u0026#34;:8080/api/admit/\u0026#34; + entryToken; 111 QRCodeWriter qrCodeWriter = new QRCodeWriter(); 112 BitMatrix bitMatrix = qrCodeWriter.encode(entryUri, BarcodeFormat.QR_CODE, 200, 200); 113 try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) { 114 MatrixToImageWriter.writeToStream(bitMatrix, \u0026#34;PNG\u0026#34;, baos); 115 byte[] png = baos.toByteArray(); 116 ticket.setQrCode(png); 117 } 118 119 } else { 120 log.info(\u0026#34;Ticket: {} lock is held by other user!\u0026#34;, ticket); 121 return false; 122 } 123 } 124 ticketRepo.saveAll(tickets); 125 return true; 126 } catch (ObjectOptimisticLockingFailureException ex) { 127 log.error(\u0026#34;Booking confirmation failed due to lock, {}\u0026#34;, ex.getMessage()); 128 return false; 129 } catch (WriterException | IOException ex) { 130 log.error(\u0026#34;Failed to generate QR code, {}\u0026#34;, ex.getMessage()); 131 return false; 132 } catch (Exception ex) { 133 log.error(\u0026#34;Booking confirmation failed, {}\u0026#34;, ex.getMessage()); 134 return false; 135 } 136 137 } 138 139 @Transactional 140 public Boolean bookingHoldCall(BookingRequest bookingRequest, Boolean start) { 141 try { 142 Iterable\u0026lt;Ticket\u0026gt; ticketSet = ticketRepo.findAllById(bookingRequest.getTicketIds()); 143 Set\u0026lt;Ticket\u0026gt; tickets = new HashSet\u0026lt;\u0026gt;(); 144 for (Ticket ticket : ticketSet) { 145 tickets.add(ticket); 146 //Reserve the ticket till the time payment is done. 147 if (start) { 148 if (ticket.getBooked()) { 149 log.info(\u0026#34;Ticket: {} is already booked!\u0026#34;, ticket); 150 return false; 151 } 152 //Only if ticket is free it can be booked. 153 if (ticket.getLockedBy().equals(\u0026#34;\u0026#34;)) { 154 ticket.setLockedBy(bookingRequest.getUser()); 155 //TTL to release lock after 30 seconds. 156 ticket.setLockExpiry(LocalDateTime.now().plusSeconds(EXPIRY_TTL_SECS)); 157 log.info(\u0026#34;Ticket: {} is reserved!\u0026#34;, ticket); 158 } else { 159 log.info(\u0026#34;Ticket: {} is already locked by other user!\u0026#34;, ticket); 160 return false; 161 } 162 } else { 163 //Only person who held the lock can release it. 164 if (ticket.getLockedBy().equals(bookingRequest.getUser())) { 165 ticket.setLockedBy(\u0026#34;\u0026#34;); 166 log.info(\u0026#34;Ticket: {} is released!\u0026#34;, ticket); 167 } else { 168 log.info(\u0026#34;Ticket: {} is already locked by other user!\u0026#34;, ticket); 169 return false; 170 } 171 } 172 } 173 ticketRepo.saveAll(tickets); 174 return true; 175 } catch (ObjectOptimisticLockingFailureException ex) { 176 log.error(\u0026#34;Error reserving flow: {}\u0026#34;, ex.getMessage()); 177 return false; 178 } 179 180 } 181 182 //Runs every 1 min. 183 @Scheduled(fixedRate = 60000) 184 public void scheduleFixedRateTask() { 185 log.info(\u0026#34;Running lock cleanup job!\u0026#34;); 186 Iterable\u0026lt;Ticket\u0026gt; ticketSet = ticketRepo.findAllByLockExpiryIsNotNull(); 187 Set\u0026lt;Ticket\u0026gt; tickets = new HashSet\u0026lt;\u0026gt;(); 188 ticketSet.forEach(t -\u0026gt; { 189 if (t.getLockExpiry().isBefore(LocalDateTime.now())) { 190 t.setLockedBy(\u0026#34;\u0026#34;); 191 t.setLockExpiry(null); 192 ticketRepo.save(t); 193 log.info(\u0026#34;Ticket: {} lock released!\u0026#34;, t); 194 } 195 }); 196 log.info(\u0026#34;Lock cleanup job completed!\u0026#34;); 197 } 198 199} 1import {Injectable} from \u0026#39;@angular/core\u0026#39;; 2import {HttpClient} from \u0026#39;@angular/common/http\u0026#39;; 3import {Observable} from \u0026#39;rxjs\u0026#39;; 4import {Ticket} from \u0026#39;../models/ticket\u0026#39;; 5import {BookingRequest} from \u0026#39;../models/booking-request\u0026#39;; 6 7@Injectable({ 8 providedIn: \u0026#39;root\u0026#39; 9}) 10export class RestService { 11 12 constructor(private http: HttpClient) { 13 } 14 15 public getTickets(): Observable\u0026lt;Ticket[]\u0026gt; { 16 return this.http.get\u0026lt;Ticket[]\u0026gt;(\u0026#39;/api/tickets\u0026#39;); 17 } 18 19 public bookTicket(bookingRequest: BookingRequest): Observable\u0026lt;any\u0026gt; { 20 return this.http.post(\u0026#39;/api/ticket\u0026#39;, bookingRequest); 21 } 22 23 public holdBooking(bookingRequest: BookingRequest): Observable\u0026lt;any\u0026gt; { 24 return this.http.post(\u0026#39;/api/hold\u0026#39;, bookingRequest); 25 } 26 27 public cancelBooking(bookingRequest: BookingRequest): Observable\u0026lt;any\u0026gt; { 28 return this.http.post(\u0026#39;/api/cancel\u0026#39;, bookingRequest); 29 } 30 31 public getUser(): Observable\u0026lt;string\u0026gt; { 32 return this.http.get\u0026lt;string\u0026gt;(\u0026#39;/api/user\u0026#39;, {responseType: \u0026#39;text\u0026#39; as \u0026#39;json\u0026#39;}); 33 } 34 35} 1\u0026lt;div class=\u0026#34;content-container\u0026#34;\u0026gt; 2 \u0026lt;div class=\u0026#34;content-area\u0026#34;\u0026gt; 3 \u0026lt;div class=\u0026#34;clr-row\u0026#34;\u0026gt; 4 \u0026lt;div class=\u0026#34;clr-col-12\u0026#34;\u0026gt; 5 \u0026lt;div class=\u0026#34;alert-section\u0026#34;\u0026gt; 6 \u0026lt;app-alert\u0026gt;\u0026lt;/app-alert\u0026gt; 7 \u0026lt;/div\u0026gt; 8 \u0026lt;h2 style=\u0026#34;text-align: center\u0026#34;\u0026gt;Tickets\u0026lt;/h2\u0026gt; 9 \u0026lt;clr-datagrid [(clrDgSelected)]=\u0026#34;selected\u0026#34;\u0026gt; 10 \u0026lt;clr-dg-column [clrDgField]=\u0026#34;\u0026#39;seatNumber\u0026#39;\u0026#34;\u0026gt;Seat Number\u0026lt;/clr-dg-column\u0026gt; 11 \u0026lt;clr-dg-column [clrDgField]=\u0026#34;\u0026#39;eventDate\u0026#39;\u0026#34;\u0026gt;Date\u0026lt;/clr-dg-column\u0026gt; 12 \u0026lt;clr-dg-column [clrDgField]=\u0026#34;\u0026#39;price\u0026#39;\u0026#34;\u0026gt;Price\u0026lt;/clr-dg-column\u0026gt; 13 \u0026lt;clr-dg-column [clrDgField]=\u0026#34;\u0026#39;booked\u0026#39;\u0026#34;\u0026gt;Status\u0026lt;/clr-dg-column\u0026gt; 14 \u0026lt;clr-dg-column [clrDgField]=\u0026#34;\u0026#39;bookedBy\u0026#39;\u0026#34;\u0026gt;Booked By\u0026lt;/clr-dg-column\u0026gt; 15 \u0026lt;clr-dg-column\u0026gt;QR Code\u0026lt;/clr-dg-column\u0026gt; 16 \u0026lt;clr-dg-column [clrDgField]=\u0026#34;\u0026#39;entered\u0026#39;\u0026#34;\u0026gt;Entered\u0026lt;/clr-dg-column\u0026gt; 17 \u0026lt;!-- structural directive --\u0026gt; 18 \u0026lt;clr-dg-row clr-dg-row *clrDgItems=\u0026#34;let ticket of tickets\u0026#34; [clrDgItem]=\u0026#34;ticket\u0026#34; 19 [clrDgSelectable]=\u0026#34;getSeatStatus(ticket) === \u0026#39;AVAILABLE\u0026#39;\u0026#34;\u0026gt; 20 \u0026lt;clr-dg-placeholder class=\u0026#34;content-center\u0026#34;\u0026gt;No Tickets!\u0026lt;/clr-dg-placeholder\u0026gt; 21 \u0026lt;clr-dg-cell\u0026gt;{{ticket.seatNumber}}\u0026lt;/clr-dg-cell\u0026gt; 22 \u0026lt;clr-dg-cell\u0026gt;{{ticket.eventDate}}\u0026lt;/clr-dg-cell\u0026gt; 23 \u0026lt;clr-dg-cell\u0026gt;{{ticket.price}}\u0026lt;/clr-dg-cell\u0026gt; 24 \u0026lt;clr-dg-cell\u0026gt;{{getSeatStatus(ticket)}}\u0026lt;/clr-dg-cell\u0026gt; 25 \u0026lt;clr-dg-cell\u0026gt;{{ticket.bookedBy}}\u0026lt;/clr-dg-cell\u0026gt; 26 \u0026lt;clr-dg-cell \u0026gt; 27 \u0026lt;a *ngIf=\u0026#34;getSeatStatus(ticket) === \u0026#39;BOOKED\u0026#39;\u0026#34; href=\u0026#34;/api/qrcode/{{ticket.entryToken}}\u0026#34; 28 target=\u0026#34;_blank\u0026#34;\u0026gt;Ticket\u0026lt;/a\u0026gt; 29 \u0026lt;/clr-dg-cell\u0026gt; 30 \u0026lt;clr-dg-cell\u0026gt; 31 \u0026lt;cds-icon *ngIf=\u0026#34;ticket.entered\u0026#34; shape=\u0026#34;success-standard\u0026#34; status=\u0026#34;success\u0026#34; title=\u0026#34;Admitted\u0026#34; 32 class=\u0026#34;action-icon\u0026#34; solid\u0026gt;\u0026lt;/cds-icon\u0026gt; 33 \u0026lt;/clr-dg-cell\u0026gt; 34 \u0026lt;/clr-dg-row\u0026gt; 35 36 \u0026lt;clr-dg-footer\u0026gt; 37 \u0026lt;clr-dg-pagination #pagination [clrDgPageSize]=\u0026#34;10\u0026#34;\u0026gt; 38 \u0026lt;clr-dg-page-size [clrPageSizeOptions]=\u0026#34;[10,20,50,100]\u0026#34;\u0026gt;Tickets per page\u0026lt;/clr-dg-page-size\u0026gt; 39 {{pagination.firstItem + 1}} - {{pagination.lastItem + 1}} of {{pagination.totalItems}} tickets 40 \u0026lt;/clr-dg-pagination\u0026gt; 41 \u0026lt;/clr-dg-footer\u0026gt; 42 \u0026lt;/clr-datagrid\u0026gt; 43 \u0026lt;br/\u0026gt; 44 \u0026lt;button type=\u0026#34;submit\u0026#34; class=\u0026#34;btn btn-primary btn-block\u0026#34; (click)=\u0026#34;holdBooking()\u0026#34; 45 *ngIf=\u0026#34;selected.length \u0026gt; 0\u0026#34;\u0026gt;Book 46 Ticket 47 \u0026lt;/button\u0026gt; 48 \u0026lt;/div\u0026gt; 49 \u0026lt;/div\u0026gt; 50 \u0026lt;/div\u0026gt; 51\u0026lt;/div\u0026gt; 52 53\u0026lt;!--Pay Modal--\u0026gt; 54\u0026lt;clr-modal [(clrModalOpen)]=\u0026#34;payModal\u0026#34; [clrModalClosable]=\u0026#34;false\u0026#34;\u0026gt; 55 \u0026lt;h3 class=\u0026#34;modal-title\u0026#34;\u0026gt;Pay \u0026amp; Confirm\u0026lt;/h3\u0026gt; 56 \u0026lt;div class=\u0026#34;modal-body\u0026#34;\u0026gt; 57 \u0026lt;p\u0026gt;You have 30 Secs to complete the payment!\u0026lt;/p\u0026gt; 58 \u0026lt;p *ngFor=\u0026#34;let item of selected;index as i\u0026#34;\u0026gt;{{i + 1}}. {{item.seatNumber}}\u0026lt;/p\u0026gt; 59 \u0026lt;p\u0026gt;Total Amount: {{getTotal()}} Rs.\u0026lt;/p\u0026gt; 60 \u0026lt;/div\u0026gt; 61 \u0026lt;div class=\u0026#34;modal-footer\u0026#34;\u0026gt; 62 \u0026lt;button type=\u0026#34;button\u0026#34; class=\u0026#34;btn btn-outline\u0026#34; (click)=\u0026#34;cancelBooking()\u0026#34;\u0026gt;Cancel\u0026lt;/button\u0026gt; 63 \u0026lt;button type=\u0026#34;button\u0026#34; class=\u0026#34;btn btn-primary\u0026#34; (click)=\u0026#34;confirmBooking()\u0026#34;\u0026gt;Confirm\u0026lt;/button\u0026gt; 64 \u0026lt;/div\u0026gt; 65\u0026lt;/clr-modal\u0026gt; 1import {Component, OnInit, ViewChild} from \u0026#39;@angular/core\u0026#39;; 2import {Ticket} from \u0026#39;../../models/ticket\u0026#39;; 3import {RestService} from \u0026#39;../../services/rest.service\u0026#39;; 4import {Router} from \u0026#39;@angular/router\u0026#39;; 5import {ClarityIcons, trashIcon} from \u0026#39;@cds/core/icon\u0026#39;; 6import {AlertComponent} from \u0026#39;../alert/alert.component\u0026#39;; 7import {BookingRequest} from \u0026#39;../../models/booking-request\u0026#39;; 8 9@Component({ 10 selector: \u0026#39;app-home\u0026#39;, 11 templateUrl: \u0026#39;./home.component.html\u0026#39;, 12 styleUrls: [] 13}) 14export class HomeComponent implements OnInit { 15 16 tickets: Ticket[] = []; 17 ticket: Ticket = new Ticket(); 18 selected: Ticket[] = []; 19 // @ts-ignore 20 @ViewChild(AlertComponent, {static: true}) private alert: AlertComponent; 21 payModal = false; 22 23 constructor(private restService: RestService, private router: Router) { 24 ClarityIcons.addIcons(trashIcon); 25 } 26 27 ngOnInit(): void { 28 this.getTickets(); 29 } 30 31 getTickets(): void { 32 this.ticket = new Ticket(); 33 this.restService.getTickets().subscribe(data =\u0026gt; { 34 this.tickets = data; 35 }); 36 } 37 38 holdBooking(): void { 39 const request = new BookingRequest(); 40 request.ticketIds = []; 41 request.user = sessionStorage.getItem(\u0026#39;user\u0026#39;); 42 this.selected.forEach(item =\u0026gt; { 43 request.ticketIds.push(Number(item.id)); 44 }); 45 this.restService.holdBooking(request) 46 .subscribe(data =\u0026gt; { 47 if (data) { 48 this.payModal = true; 49 } else { 50 this.alert.showError(\u0026#39;Ticket is already reserved, Try again!\u0026#39;); 51 this.getTickets(); 52 } 53 }, 54 error =\u0026gt; { 55 this.alert.showError(\u0026#39;Ticket is already reserved, Try again!\u0026#39;); 56 this.getTickets(); 57 }); 58 59 } 60 61 cancelBooking(): void { 62 const request = new BookingRequest(); 63 request.ticketIds = []; 64 request.user = sessionStorage.getItem(\u0026#39;user\u0026#39;); 65 this.selected.forEach(item =\u0026gt; { 66 request.ticketIds.push(Number(item.id)); 67 }); 68 this.restService.cancelBooking(request) 69 .subscribe(data =\u0026gt; { 70 this.payModal = false; 71 }); 72 } 73 74 confirmBooking(): void { 75 const request = new BookingRequest(); 76 request.ticketIds = []; 77 request.user = sessionStorage.getItem(\u0026#39;user\u0026#39;); 78 this.selected.forEach(item =\u0026gt; { 79 request.ticketIds.push(Number(item.id)); 80 }); 81 this.restService.bookTicket(request) 82 .subscribe(data =\u0026gt; { 83 if (data) { 84 this.alert.showSuccess(\u0026#39;Ticket booked successfully!\u0026#39;); 85 } else { 86 this.alert.showError(\u0026#39;Ticket booking failed!\u0026#39;); 87 } 88 this.payModal = false; 89 this.getTickets(); 90 }); 91 } 92 93 getTotal(): number { 94 let sum = 0; 95 this.selected.forEach(item =\u0026gt; { 96 // @ts-ignore 97 sum += item.price; 98 }); 99 return sum; 100 } 101 102 getSeatStatus(ticket: Ticket): string { 103 // @ts-ignore 104 if (ticket.lockedBy !== \u0026#39;\u0026#39;) { 105 return \u0026#39;RESERVED\u0026#39;; 106 } 107 if (ticket.booked) { 108 return \u0026#39;BOOKED\u0026#39;; 109 } else { 110 return \u0026#39;AVAILABLE\u0026#39;; 111 } 112 } 113 114} Setup 1# Project 87 2 3Ticket Booking Application with QR code tickets 4 5[https://gitorko.github.io/ticket-booking-system/](https://gitorko.github.io/ticket-booking-system/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk version \u0026#34;21.0.3\u0026#34; 2024-04-16 LTS 14 15node --version 16v16.16.0 17 18yarn --version 191.22.18 20``` 21 22### Postgres DB 23 24``` 25docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:14 26docker ps 27docker exec -it pg-container psql -U postgres -W postgres 28CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 29CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 30grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 31 32docker stop pg-container 33docker start pg-container 34``` 35 36### Dev 37 38To run the backend in dev mode. 39 40```bash 41./gradlew clean build 42./gradlew bootRun 43``` 44 45To Run UI in dev mode 46 47```bash 48cd ui 49yarn install 50yarn build 51yarn start 52``` 53 54Open [http://localhost:4200/](http://localhost:4200/) 55 56### Prod 57 58To run as a single jar, both UI and backend are bundled to single uber jar. 59 60```bash 61./gradlew cleanBuild 62cd build/libs 63java -jar project87-1.0.0.jar 64``` 65 66Open [http://localhost:8080/](http://localhost:8080/) 67 68### Docker 69 70```bash 71./gradlew cleanBuild 72docker build -f docker/Dockerfile --force-rm -t project87:1.0.0 . 73docker images |grep project87 74docker tag project87:1.0.0 gitorko/project87:1.0.0 75docker push gitorko/project87:1.0.0 76docker-compose -f docker/docker-compose.yml up 77``` References https://clarity.design/\nhttps://spring.io/projects/spring-boot\n","link":"https://gitorko.github.io/post/ticket-booking-system/","section":"post","tags":["optimistic-locking","jpa"],"title":"Ticket Booking System"},{"body":"","link":"https://gitorko.github.io/tags/completable-future/","section":"tags","tags":null,"title":"Completable-Future"},{"body":"","link":"https://gitorko.github.io/categories/completablefuture/","section":"categories","tags":null,"title":"CompletableFuture"},{"body":"Async Programming using CompletableFuture\nGithub: https://github.com/gitorko/project83\nBasics Methods demonstrating how to use CompletableFuture\n1package com.demo.project83; 2 3import static org.awaitility.Awaitility.await; 4 5import java.util.ArrayList; 6import java.util.Date; 7import java.util.List; 8import java.util.Objects; 9import java.util.concurrent.CompletableFuture; 10import java.util.concurrent.ExecutorService; 11import java.util.concurrent.Executors; 12import java.util.concurrent.Future; 13import java.util.concurrent.TimeUnit; 14import java.util.concurrent.TimeoutException; 15import java.util.concurrent.atomic.AtomicInteger; 16import java.util.stream.Collectors; 17 18import lombok.SneakyThrows; 19import lombok.extern.slf4j.Slf4j; 20import org.junit.jupiter.api.Assertions; 21import org.junit.jupiter.api.Test; 22 23@Slf4j 24public class CompletableFutureTest { 25 26 static AtomicInteger counter = new AtomicInteger(); 27 28 /** 29 * get() is blocking call. So main thread has to wait. 30 * Old way with Future. Dont use it. 31 */ 32 @Test 33 @SneakyThrows 34 void blockingChain_test() { 35 counter = new AtomicInteger(); 36 List\u0026lt;Future\u0026lt;String\u0026gt;\u0026gt; futureLst = new ArrayList\u0026lt;\u0026gt;(); 37 ExecutorService executor = Executors.newCachedThreadPool(); 38 for (int i = 0; i \u0026lt; 5; i++) { 39 int finalI = i; 40 Future\u0026lt;String\u0026gt; future = executor.submit(() -\u0026gt; greetHello(\u0026#34;Jack_\u0026#34; + finalI)); 41 futureLst.add(future); 42 } 43 for (Future\u0026lt;String\u0026gt; future : futureLst) { 44 //get is blocking the main thread here. 45 String message = future.get(); 46 finishedGreetHello(message); 47 } 48 executor.shutdown(); 49 Assertions.assertEquals(5, counter.get()); 50 } 51 52 /** 53 * Callback attached so non blocking. 54 * 55 * Ability to provide call back functionality. 56 * You can manually set the return response on a CompletableFuture which you cant do on Future. You can cancel it as well. 57 * You can chain \u0026amp; combine CompletableFutures which is not possible with Future. 58 * Exception handling support in CompletableFutures which is not available in Future. 59 * 60 * Although chaining can be done manually but not advised to use this approach. 61 * This example is for reference only. 62 */ 63 @Test 64 @SneakyThrows 65 void nonBlockingChain_test() { 66 counter = new AtomicInteger(); 67 ExecutorService executor = Executors.newCachedThreadPool(); 68 for (int i = 0; i \u0026lt; 5; i++) { 69 int finalI = i; 70 executor.submit(() -\u0026gt; { 71 CompletableFutureTest.greetHelloChain(\u0026#34;Jack_\u0026#34; + finalI, new CompletableFuture\u0026lt;\u0026gt;()); 72 }); 73 } 74 //Give enough time for all threads to complete and return back with results. 75 TimeUnit.SECONDS.sleep(10); 76 executor.shutdown(); 77 Assertions.assertEquals(5, counter.get()); 78 } 79 80 /** 81 * When function does not return anything then use CompletableFuture.runAsync() 82 * returns CompletableFuture\u0026lt;Void\u0026gt; 83 */ 84 @Test 85 @SneakyThrows 86 void runAsync_test() { 87 counter = new AtomicInteger(); 88 for (int i = 0; i \u0026lt; 5; i++) { 89 int finalI = i; 90 CompletableFuture.runAsync(() -\u0026gt; { 91 greetHello(\u0026#34;Jack_\u0026#34; + finalI); 92 }).thenRun(() -\u0026gt; { 93 counter.incrementAndGet(); 94 log.info(\u0026#34;Completed!\u0026#34;); 95 }); 96 } 97 //Give enough time for all threads to complete and return back with results. 98 TimeUnit.SECONDS.sleep(5); 99 Assertions.assertEquals(5, counter.get()); 100 } 101 102 @Test 103 @SneakyThrows 104 void runAsync_test_await() { 105 counter = new AtomicInteger(); 106 for (int i = 0; i \u0026lt; 5; i++) { 107 int finalI = i; 108 CompletableFuture.runAsync(() -\u0026gt; { 109 greetHello(\u0026#34;Jack_\u0026#34; + finalI); 110 }).thenRun(() -\u0026gt; { 111 counter.incrementAndGet(); 112 log.info(\u0026#34;Completed!\u0026#34;); 113 }); 114 } 115 await().atMost(2, TimeUnit.SECONDS).until(() -\u0026gt; counter.get() == 5); 116 Assertions.assertEquals(5, counter.get()); 117 } 118 119 /** 120 * Returns CompletableFuture\u0026lt;T\u0026gt; 121 */ 122 @Test 123 @SneakyThrows 124 void supplyAsync_test() { 125 counter = new AtomicInteger(); 126 for (int i = 0; i \u0026lt; 5; i++) { 127 int finalI = i; 128 CompletableFuture.supplyAsync(() -\u0026gt; { 129 return greetHello(\u0026#34;Jack_\u0026#34; + finalI); 130 }).thenAccept(message -\u0026gt; { 131 counter.incrementAndGet(); 132 log.info(\u0026#34;Greeting: {}\u0026#34;, message); 133 }); 134 } 135 //Give enough time for all threads to complete and return back with results. 136 TimeUnit.SECONDS.sleep(5); 137 Assertions.assertEquals(5, counter.get()); 138 } 139 140 /** 141 * thenApply will return a nested CompletionStage. 142 * thenApply returns CompletionStage \u0026amp; return value of the function. 143 */ 144 @Test 145 @SneakyThrows 146 void thenApply_test() { 147 CompletableFuture\u0026lt;String\u0026gt; completableFuture = CompletableFuture.supplyAsync(() -\u0026gt; { 148 //Do some computation \u0026amp; return the result 149 return \u0026#34;hello \u0026#34;; 150 }).thenApply(message -\u0026gt; { 151 return message + \u0026#34;world\u0026#34;; 152 }).thenApply(message -\u0026gt; { 153 return message.toUpperCase(); 154 }); 155 // Returns type CompletionStage\u0026lt;CompletionStage\u0026lt;CompletionStage\u0026lt;String\u0026gt;\u0026gt;\u0026gt;. 156 Assertions.assertEquals(\u0026#34;HELLO WORLD\u0026#34;, completableFuture.get()); 157 } 158 159 /** 160 * thenAccept will return a single CompletionStage, flattening effect like a flatMap 161 * thenAccept takes a Consumer and returns a Void \u0026amp; only the completion state. 162 */ 163 @Test 164 @SneakyThrows 165 void thenAccept_test() { 166 counter = new AtomicInteger(); 167 CompletableFuture\u0026lt;Void\u0026gt; completableFuture = CompletableFuture.supplyAsync(() -\u0026gt; { 168 //Do some computation \u0026amp; return the result 169 return \u0026#34;hello world\u0026#34;; 170 }).thenAccept(message -\u0026gt; { 171 log.info(\u0026#34;Got Message: {}\u0026#34;, message); 172 }).thenRun(() -\u0026gt; { 173 counter.incrementAndGet(); 174 log.info(\u0026#34;Cant access previous result, just running!\u0026#34;); 175 }); 176 completableFuture.get(); 177 Assertions.assertEquals(1, counter.get()); 178 } 179 180 /** 181 * thenCompose() combines two futures where one future is dependent on the other 182 * thenCompose will return a single CompletionStage, flattening effect like a flatMap 183 */ 184 @Test 185 @SneakyThrows 186 void thenCompose_test() { 187 //Notice the flattened return type. Combines 2 dependent future. 188 CompletableFuture\u0026lt;String\u0026gt; completableFuture = getGreeting(\u0026#34;Jack\u0026#34;) 189 .thenCompose(message -\u0026gt; CompletableFutureTest.transformMessage(message)); 190 Assertions.assertEquals(\u0026#34;HELLO JACK\u0026#34;, completableFuture.get()); 191 } 192 193 /** 194 * thenCombine() combines two independent futures. 195 */ 196 @Test 197 @SneakyThrows 198 void thenCombine_test() { 199 //Combines the 2 independent futures. 200 CompletableFuture\u0026lt;String\u0026gt; completableFuture = getGreeting(\u0026#34;Jack\u0026#34;) 201 .thenCombine(CompletableFutureTest.getCurrentDate(), (message, currentDate) -\u0026gt; { 202 return CompletableFutureTest.addDateToMessage(message, currentDate); 203 }); 204 Assertions.assertTrue(completableFuture.get().contains(\u0026#34;Hello Jack was sent on\u0026#34;)); 205 } 206 207 @Test 208 @SneakyThrows 209 void exceptionally_test() { 210 CompletableFuture\u0026lt;String\u0026gt; completableFuture = CompletableFuture.supplyAsync(() -\u0026gt; { 211 //Do some computation \u0026amp; return the result 212 return \u0026#34;Stage 0\u0026#34;; 213 }).thenApply(result -\u0026gt; { 214 return result + \u0026#34; -\u0026gt; Stage 1\u0026#34;; 215 }).exceptionally(ex -\u0026gt; { 216 return \u0026#34;Error in stage 1 : \u0026#34; + ex.getMessage(); 217 }).thenApply(result -\u0026gt; { 218 if (true) { 219 throw new RuntimeException(\u0026#34;My custom error!\u0026#34;); 220 } 221 return result + \u0026#34; -\u0026gt; Stage 2\u0026#34;; 222 }).exceptionally(ex -\u0026gt; { 223 return \u0026#34;Error in stage 2 : \u0026#34; + ex.getMessage(); 224 }); 225 Assertions.assertTrue(completableFuture.get().contains(\u0026#34;Error in stage 2\u0026#34;)); 226 } 227 228 @Test 229 @SneakyThrows 230 void allOf_test() { 231 counter = new AtomicInteger(); 232 List\u0026lt;CompletableFuture\u0026lt;Void\u0026gt;\u0026gt; tasks = getListOfTasks(); 233 CompletableFuture\u0026lt;Void\u0026gt; allTasks = CompletableFuture.allOf(tasks.get(0), tasks.get(1), tasks.get(2)); 234 allTasks.get(); 235 log.info(\u0026#34;Waited for all tasks to complete and then returned!\u0026#34;); 236 Assertions.assertEquals(3, counter.get()); 237 } 238 239 @Test 240 @SneakyThrows 241 void anyOf_test() { 242 counter = new AtomicInteger(); 243 List\u0026lt;CompletableFuture\u0026lt;Void\u0026gt;\u0026gt; tasks = getListOfTasks(); 244 CompletableFuture\u0026lt;Object\u0026gt; allTasks = CompletableFuture.anyOf(tasks.get(0), tasks.get(1), tasks.get(2)); 245 allTasks.get(); 246 log.info(\u0026#34;Waited for any one task to complete and then returned!\u0026#34;); 247 Assertions.assertTrue(counter.get() \u0026gt;= 1); 248 } 249 250 @Test 251 @SneakyThrows 252 void allOf_withTimeLimit_test() { 253 counter = new AtomicInteger(); 254 List\u0026lt;CompletableFuture\u0026lt;Void\u0026gt;\u0026gt; tasks = getListOfTasks(); 255 CompletableFuture\u0026lt;Void\u0026gt; allTasks = CompletableFuture.allOf(tasks.get(0), tasks.get(1), tasks.get(2)); 256 try { 257 allTasks.get(4, TimeUnit.SECONDS); 258 } catch (TimeoutException ex) { 259 log.error(\u0026#34;timeout!\u0026#34;, ex); 260 } 261 log.info(\u0026#34;Waited for 4 seconds and returned!\u0026#34;); 262 Assertions.assertTrue(counter.get() \u0026gt;= 2); 263 } 264 265 @Test 266 @SneakyThrows 267 void allOf_iterate() { 268 List\u0026lt;String\u0026gt; names = List.of(\u0026#34;Jack\u0026#34;, \u0026#34;Adam\u0026#34;, \u0026#34;Ram\u0026#34;, \u0026#34;Ajay\u0026#34;); 269 List\u0026lt;CompletableFuture\u0026lt;String\u0026gt;\u0026gt; customersFuture = names.stream() 270 .map(userName -\u0026gt; checkName(userName)) 271 .collect(Collectors.toList()); 272 273 CompletableFuture\u0026lt;Void\u0026gt; allFutures = CompletableFuture.allOf(customersFuture.toArray(new CompletableFuture[customersFuture.size()])); 274 275 CompletableFuture\u0026lt;List\u0026lt;String\u0026gt;\u0026gt; allCustomersFuture = allFutures.thenApply(v -\u0026gt; customersFuture.stream() 276 .map(pageContentFuture -\u0026gt; pageContentFuture.join()) 277 .filter(Objects::isNull) 278 .collect(Collectors.toList())); 279 280 List\u0026lt;String\u0026gt; customers = allCustomersFuture.get(); 281 Assertions.assertEquals(2, customers.size()); 282 } 283 284 private static CompletableFuture\u0026lt;String\u0026gt; checkName(String userName) { 285 return CompletableFuture.supplyAsync(() -\u0026gt; { 286 if (userName.startsWith(\u0026#34;A\u0026#34;)) return userName; 287 return null; 288 }); 289 } 290 291 private static String greetHello(String name) { 292 log.info(\u0026#34;Got name: {}\u0026#34;, name); 293 return \u0026#34;Hello \u0026#34; + name; 294 } 295 296 private static void finishedGreetHello(String result) { 297 counter.incrementAndGet(); 298 log.info(\u0026#34;Finished greet chain: {}\u0026#34;, result); 299 } 300 301 private static void greetHelloChain(String name, CompletableFuture\u0026lt;String\u0026gt; completableFuture) { 302 log.info(\u0026#34;Got name: {}\u0026#34;, name); 303 completableFuture.complete(\u0026#34;Hello \u0026#34; + name); 304 completableFuture.whenComplete(CompletableFutureTest::finishedGreetHelloChain); 305 } 306 307 private static void finishedGreetHelloChain(String result, Throwable t) { 308 counter.incrementAndGet(); 309 log.info(\u0026#34;Finished greet chain: {}\u0026#34;, result); 310 } 311 312 private static CompletableFuture\u0026lt;String\u0026gt; getGreeting(String userName) { 313 return CompletableFuture.supplyAsync(() -\u0026gt; { 314 return \u0026#34;Hello \u0026#34; + userName; 315 }); 316 } 317 318 private static CompletableFuture\u0026lt;Date\u0026gt; getCurrentDate() { 319 return CompletableFuture.supplyAsync(() -\u0026gt; { 320 return new Date(); 321 }); 322 } 323 324 private static CompletableFuture\u0026lt;String\u0026gt; transformMessage(String message) { 325 return CompletableFuture.supplyAsync(() -\u0026gt; { 326 return message.toUpperCase(); 327 }); 328 } 329 330 private static String addDateToMessage(String message, Date currentDate) { 331 return message + \u0026#34; was sent on \u0026#34; + currentDate; 332 } 333 334 //Each task is delayed by few seconds 335 private static List\u0026lt;CompletableFuture\u0026lt;Void\u0026gt;\u0026gt; getListOfTasks() { 336 List\u0026lt;CompletableFuture\u0026lt;Void\u0026gt;\u0026gt; tasks = new ArrayList\u0026lt;\u0026gt;(); 337 tasks.add(CompletableFuture.supplyAsync(() -\u0026gt; { 338 return greetHello(\u0026#34;Jack\u0026#34;); 339 }).thenAccept(message -\u0026gt; { 340 counter.incrementAndGet(); 341 try { 342 TimeUnit.SECONDS.sleep(1); 343 } catch (InterruptedException e) { 344 } 345 log.info(\u0026#34;Greeting: {}\u0026#34;, message); 346 })); 347 tasks.add(CompletableFuture.supplyAsync(() -\u0026gt; { 348 return greetHello(\u0026#34;Raj\u0026#34;); 349 }).thenAccept(message -\u0026gt; { 350 counter.incrementAndGet(); 351 try { 352 TimeUnit.SECONDS.sleep(3); 353 } catch (InterruptedException e) { 354 } 355 log.info(\u0026#34;Greeting: {}\u0026#34;, message); 356 })); 357 tasks.add(CompletableFuture.supplyAsync(() -\u0026gt; { 358 return greetHello(\u0026#34;Dan\u0026#34;); 359 }).thenAccept(message -\u0026gt; { 360 counter.incrementAndGet(); 361 try { 362 TimeUnit.SECONDS.sleep(5); 363 } catch (InterruptedException e) { 364 } 365 log.info(\u0026#34;Greeting: {}\u0026#34;, message); 366 })); 367 return tasks; 368 } 369 370} References https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/util/concurrent/CompletableFuture.html\n","link":"https://gitorko.github.io/post/completable-future-basics/","section":"post","tags":["completable-future","future"],"title":"CompletableFuture - Basics"},{"body":"","link":"https://gitorko.github.io/tags/future/","section":"tags","tags":null,"title":"Future"},{"body":"ModelMapper is an intelligent, refactoring safe object mapping library that automatically maps objects to each other. It uses a convention based approach.\nGithub: https://github.com/gitorko/project85\nModel Mapper Methods demonstrating how to use model mapper\n1package com.demo.project85; 2 3import java.util.ArrayList; 4import java.util.List; 5 6import lombok.AllArgsConstructor; 7import lombok.Builder; 8import lombok.Data; 9import lombok.NoArgsConstructor; 10import lombok.extern.slf4j.Slf4j; 11import org.junit.jupiter.api.Assertions; 12import org.junit.jupiter.api.Test; 13import org.modelmapper.Converter; 14import org.modelmapper.ModelMapper; 15import org.modelmapper.PropertyMap; 16import org.modelmapper.TypeMap; 17 18@Slf4j 19public class ModelMapperTest { 20 21 @Test 22 public void test_inlineCall() { 23 Person person = getPerson(); 24 ModelMapper modelMapper = new ModelMapper(); 25 TypeMap\u0026lt;Person, PersonView\u0026gt; typeMap = modelMapper.createTypeMap(Person.class, PersonView.class); 26 typeMap.addMappings(mapper -\u0026gt; { 27 mapper.map(Person::getFirstName, PersonView::setFirstName); 28 mapper.map(Person::getFamilyName, PersonView::setLastName); 29 }); 30 PersonView personView = modelMapper.map(person, PersonView.class); 31 log.info(\u0026#34;personView: {}\u0026#34;, personView); 32 Assertions.assertEquals(personView.getFirstName(), person.getFirstName()); 33 Assertions.assertEquals(personView.getLastName(), person.getFamilyName()); 34 } 35 36 @Test 37 public void test_directCall() { 38 Person person = getPerson(); 39 ModelMapper modelMapper = new ModelMapper(); 40 modelMapper.addMappings(personMap); 41 PersonView personView = modelMapper.map(person, PersonView.class); 42 log.info(\u0026#34;personView: {}\u0026#34;, personView); 43 Assertions.assertEquals(personView.getFirstName(), person.getFirstName()); 44 Assertions.assertEquals(personView.getLastName(), person.getFamilyName().toUpperCase()); 45 } 46 47 @Test 48 public void test_directCallJava8() { 49 Person person = getPerson(); 50 ModelMapper modelMapper = new ModelMapper(); 51 PersonView personView = modelMapper 52 .typeMap(Person.class, PersonView.class) 53 .addMappings(mapper -\u0026gt; { 54 mapper.using(toUppercase) 55 .map(src -\u0026gt; src.getFamilyName(), PersonView::setLastName); 56 mapper.using(workStatusConvertor) 57 .map(src -\u0026gt; src.getWorkStatus(), PersonView::setWorkStatusName); 58 mapper.map(Person::getAge, PersonView::setAgeStr); 59 }) 60 .map(person); 61 log.info(\u0026#34;personView: {}\u0026#34;, personView); 62 Assertions.assertEquals(personView.getFirstName(), person.getFirstName()); 63 Assertions.assertEquals(personView.getLastName(), person.getFamilyName().toUpperCase()); 64 Assertions.assertEquals(personView.getAgeStr(), String.valueOf(person.getAge())); 65 } 66 67 @Test 68 public void test_genericHelperClass() { 69 Person person = getPerson(); 70 MapperHelper\u0026lt;Person, PersonView\u0026gt; entityMapperHelper = new MapperHelper\u0026lt;\u0026gt;(Person.class, PersonView.class); 71 entityMapperHelper.mapper.typeMap(Person.class, PersonView.class) 72 .addMappings(mapper -\u0026gt; { 73 mapper.using(toUppercase) 74 .map(src -\u0026gt; src.getFamilyName(), PersonView::setLastName); 75 mapper.using(workStatusConvertor) 76 .map(src -\u0026gt; src.getWorkStatus(), PersonView::setWorkStatusName); 77 mapper.map(Person::getAge, PersonView::setAgeStr); 78 }); 79 PersonView personView = entityMapperHelper.toModel(person); 80 log.info(\u0026#34;personView: {}\u0026#34;, personView); 81 Assertions.assertEquals(personView.getFirstName(), person.getFirstName()); 82 } 83 84 @Test 85 public void test_listConvertGenericHelperClass() { 86 List\u0026lt;Person\u0026gt; personList = getListOfPersons(); 87 MapperHelper\u0026lt;Person,PersonView\u0026gt; entityMapperHelper = new MapperHelper\u0026lt;\u0026gt;(Person.class, PersonView.class); 88 List\u0026lt;PersonView\u0026gt; personViewList = entityMapperHelper.toListModel(personList); 89 log.info(\u0026#34;personView: {}\u0026#34;, personViewList); 90 Assertions.assertEquals(personViewList.size(), personList.size()); 91 } 92 93 PropertyMap\u0026lt;Person, PersonView\u0026gt; personMap = new PropertyMap\u0026lt;\u0026gt;() { 94 protected void configure() { 95 using(toUppercase).map().setLastName(source.getFamilyName()); 96 } 97 }; 98 99 Converter\u0026lt;String, String\u0026gt; toUppercase = 100 context -\u0026gt; context.getSource() == null ? null : context.getSource().toUpperCase(); 101 102 Converter\u0026lt;WorkStatus, String\u0026gt; workStatusConvertor = 103 context -\u0026gt; context.getSource() == null ? null : context.getSource().getStatus(); 104 105 public static List\u0026lt;Person\u0026gt; getListOfPersons() { 106 List\u0026lt;Person\u0026gt; personList = new ArrayList\u0026lt;\u0026gt;(); 107 personList.add(Person.builder() 108 .firstName(\u0026#34;luke\u0026#34;) 109 .familyName(\u0026#34;skywalker\u0026#34;) 110 .age(30) 111 .workStatus(WorkStatus.EMPLOYEED) 112 .build()); 113 personList.add(Person.builder() 114 .firstName(\u0026#34;han\u0026#34;) 115 .familyName(\u0026#34;solo\u0026#34;) 116 .age(35) 117 .workStatus(WorkStatus.EMPLOYEED) 118 .build()); 119 return personList; 120 } 121 122 public static Person getPerson() { 123 return Person.builder() 124 .firstName(\u0026#34;luke\u0026#34;) 125 .familyName(\u0026#34;skywalker\u0026#34;) 126 .workStatus(WorkStatus.EMPLOYEED) 127 .age(30) 128 .build(); 129 } 130} 131 132@Data 133@Builder 134@NoArgsConstructor 135@AllArgsConstructor 136class Person { 137 String firstName; 138 String familyName; 139 WorkStatus workStatus; 140 int age; 141} 142 143enum WorkStatus { 144 EMPLOYEED(\u0026#34;Employeed\u0026#34;), 145 UN_EMPLOYEED(\u0026#34;Unemployeed\u0026#34;); 146 147 String status; 148 WorkStatus(String status) { 149 this.status = status; 150 } 151 152 public String getStatus() { 153 return status; 154 } 155} 156 157@Data 158@Builder 159@NoArgsConstructor 160@AllArgsConstructor 161class PersonView { 162 String firstName; 163 String lastName; 164 WorkStatus workStatus; 165 String workStatusName; 166 String ageStr; 167} 1package com.demo.project85; 2 3import java.util.List; 4import java.util.stream.Collectors; 5 6import org.modelmapper.ModelMapper; 7import org.springframework.data.domain.Page; 8 9public class MapperHelper\u0026lt;E, M\u0026gt; { 10 public MapperHelper(Class\u0026lt;E\u0026gt; entityType, Class\u0026lt;M\u0026gt; modelType) { 11 this.entityType = entityType; 12 this.modelType = modelType; 13 this.mapper = new ModelMapper(); 14 } 15 16 public E toEntity(M model) { 17 return mapper.map(model, entityType); 18 } 19 20 public M toModel(E entity) { 21 return mapper.map(entity, modelType); 22 } 23 24 public Page\u0026lt;M\u0026gt; toPagedModel(Page\u0026lt;E\u0026gt; entities) { 25 return entities.map(this::toModel); 26 } 27 28 public List\u0026lt;M\u0026gt; toListModel(List\u0026lt;E\u0026gt; source) { 29 return source 30 .stream() 31 .map(this::toModel) 32 .collect(Collectors.toList()); 33 } 34 35 Class\u0026lt;E\u0026gt; entityType; 36 Class\u0026lt;M\u0026gt; modelType; 37 ModelMapper mapper; 38} References http://modelmapper.org/\n","link":"https://gitorko.github.io/post/model-mapper/","section":"post","tags":["model-mapper"],"title":"Model Mapper"},{"body":"","link":"https://gitorko.github.io/tags/model-mapper/","section":"tags","tags":null,"title":"Model-Mapper"},{"body":"","link":"https://gitorko.github.io/categories/modelmapper/","section":"categories","tags":null,"title":"ModelMapper"},{"body":"Reactive programming examples on how to use spring reactor.\nGithub: https://github.com/gitorko/project83\nSpring Reactor Spring Reactor is a library for building non-blocking, reactive applications in Java. Reactor is used in Spring WebFlux, which is the reactive web framework included in Spring 5.\nFeatures\nReactive Streams: Reactor is based on the Reactive Streams specification, which defines a standard for asynchronous stream processing with non-blocking backpressure. Mono and Flux: Mono represents a single value or an empty result (similar to Optional). Flux represents a stream of 0 to N elements. Functional API: Reactor provides a rich set of operators that allow you to manipulate, transform, and compose reactive streams in a functional style. Non-blocking: Reactor is designed to work in a non-blocking manner, making it suitable for applications that need to handle a large number of concurrent I/O operations. Backpressure: Reactor supports backpressure, a mechanism to ensure that a producer does not overwhelm a consumer with too much data. Code 1package com.demo.project83; 2 3import static com.demo.project83.common.HelperUtil.getCustomer; 4import static com.demo.project83.common.HelperUtil.getCustomers; 5import static com.demo.project83.common.HelperUtil.getName; 6import static org.assertj.core.api.Assertions.assertThat; 7import static org.junit.jupiter.api.Assertions.assertEquals; 8 9import java.nio.file.Files; 10import java.nio.file.Path; 11import java.nio.file.Paths; 12import java.time.Duration; 13import java.time.LocalTime; 14import java.util.Arrays; 15import java.util.Collection; 16import java.util.HashMap; 17import java.util.List; 18import java.util.Map; 19import java.util.Optional; 20import java.util.UUID; 21import java.util.concurrent.Callable; 22import java.util.concurrent.CountDownLatch; 23import java.util.concurrent.FutureTask; 24import java.util.concurrent.TimeUnit; 25import java.util.concurrent.atomic.AtomicLong; 26import java.util.function.Consumer; 27import java.util.function.Function; 28import java.util.function.Supplier; 29import java.util.stream.Collectors; 30import java.util.stream.IntStream; 31import java.util.stream.Stream; 32 33import com.demo.project83.common.CompanyVO; 34import com.demo.project83.common.Customer; 35import com.demo.project83.common.Employee; 36import com.demo.project83.common.HelperUtil; 37import com.demo.project83.common.MyFeed; 38import com.demo.project83.common.MyListener; 39import lombok.SneakyThrows; 40import lombok.extern.slf4j.Slf4j; 41import org.junit.jupiter.api.Assertions; 42import org.junit.jupiter.api.BeforeAll; 43import org.junit.jupiter.api.Test; 44import org.reactivestreams.Subscription; 45import org.springframework.data.domain.PageImpl; 46import org.springframework.data.domain.PageRequest; 47import reactor.blockhound.BlockHound; 48import reactor.blockhound.BlockingOperationError; 49import reactor.core.Disposable; 50import reactor.core.Exceptions; 51import reactor.core.publisher.BaseSubscriber; 52import reactor.core.publisher.ConnectableFlux; 53import reactor.core.publisher.Flux; 54import reactor.core.publisher.FluxSink; 55import reactor.core.publisher.GroupedFlux; 56import reactor.core.publisher.Mono; 57import reactor.core.publisher.ParallelFlux; 58import reactor.core.scheduler.Schedulers; 59import reactor.test.StepVerifier; 60import reactor.test.scheduler.VirtualTimeScheduler; 61import reactor.tools.agent.ReactorDebugAgent; 62import reactor.util.function.Tuple2; 63import reactor.util.function.Tuples; 64import reactor.util.retry.Retry; 65import reactor.util.retry.RetryBackoffSpec; 66 67/** 68 * Reactive Streams Specification 69 * 1. Asynchronous 70 * 2. Non-Blocking 71 * 3. Backpressure 72 * 73 * Publisher (Mono/Flux) 74 * - subscribe (data source, db, remote service) 75 * Subscriber 76 * - onSubscribe 77 * - onNext 78 * - onError 79 * - onComplete 80 * Subscription 81 * - request 82 * - cancel 83 * Processor - Publisher + Subscriber 84 * 85 * Spring reactor is a Push + Pull data flow model 86 * 87 * Subscribers request for data. Publishers provide data 88 * subscribers (downstream) and publishers (upstream) 89 */ 90@Slf4j 91public class ReactorTest { 92 93 @BeforeAll 94 public static void init() { 95 BlockHound.install(); 96 } 97 98 /** 99 * ******************************************************************** 100 * Mono 101 * ******************************************************************** 102 */ 103 104 @Test 105 void test_stepVerifier() { 106 Mono.just(\u0026#34;jack\u0026#34;) 107 .as(StepVerifier::create) 108 .expectNext(\u0026#34;jack\u0026#34;) 109 .verifyComplete(); 110 } 111 112 @Test 113 void test_mono() { 114 //justOrEmpty 115 Mono\u0026lt;String\u0026gt; mono = Mono.just(\u0026#34;jack\u0026#34;); 116 mono.subscribe(System.out::println); 117 StepVerifier.create(mono) 118 .expectNext(\u0026#34;jack\u0026#34;) 119 .verifyComplete(); 120 } 121 122 @Test 123 void test_justOrEmpty() { 124 //justOrEmpty 125 Mono\u0026lt;String\u0026gt; mono = Mono.justOrEmpty(\u0026#34;jack\u0026#34;); 126 mono.subscribe(System.out::println); 127 StepVerifier.create(mono) 128 .expectNext(\u0026#34;jack\u0026#34;) 129 .verifyComplete(); 130 } 131 132 @Test 133 void test_justOrEmpty_null() { 134 //Note: Reactive Streams do not accept null values 135 Mono\u0026lt;String\u0026gt; mono = Mono.justOrEmpty(null); 136 mono.subscribe(System.out::println); 137 StepVerifier.create(mono) 138 .expectNextCount(0) 139 .verifyComplete(); 140 } 141 142 /** 143 * ******************************************************************** 144 * log 145 * request(unbounded) 146 * default subscribe requests unbounded, all elements are requested 147 * ******************************************************************** 148 */ 149 @Test 150 void test_log() { 151 //Note: Use log to look at transitions. 152 Mono\u0026lt;String\u0026gt; mono = Mono.just(\u0026#34;jack\u0026#34;) 153 .log(); 154 mono.subscribe(s -\u0026gt; { 155 log.info(\u0026#34;Got: {}\u0026#34;, s); 156 }); 157 StepVerifier.create(mono) 158 .expectNext(\u0026#34;jack\u0026#34;) 159 .verifyComplete(); 160 } 161 162 /** 163 * ******************************************************************** 164 * flux 165 * ******************************************************************** 166 */ 167 @Test 168 void test_flux() { 169 Flux flux = Flux.just(\u0026#34;jack\u0026#34;, \u0026#34;raj\u0026#34;); 170 flux.subscribe(System.out::println); 171 StepVerifier.create(flux) 172 .expectNext(\u0026#34;jack\u0026#34;, \u0026#34;raj\u0026#34;) 173 .verifyComplete(); 174 } 175 176 /** 177 * ******************************************************************** 178 * Avoid blocking operations that hold thread 179 * ******************************************************************** 180 */ 181 @Test 182 void test_delayElements() { 183 Flux flux = Flux.just(\u0026#34;jack\u0026#34;, \u0026#34;raj\u0026#34;) 184 .map(e -\u0026gt; { 185 log.info(\u0026#34;Received: {}\u0026#34;, e); 186 //Bad idea to do Thread.sleep or any blocking call. 187 //Use delayElements. 188 return e; 189 }).delayElements(Duration.ofSeconds(1)); 190 flux.subscribe(System.out::println); 191 //Test will wait for 1 second 192 StepVerifier.create(flux) 193 .expectNext(\u0026#34;jack\u0026#34;, \u0026#34;raj\u0026#34;) 194 .verifyComplete(); 195 } 196 197 @Test 198 void test_delayElements_virtualTime() { 199 VirtualTimeScheduler.getOrSet(); 200 Flux flux = Flux.just(\u0026#34;jack\u0026#34;, \u0026#34;raj\u0026#34;) 201 .map(e -\u0026gt; { 202 log.info(\u0026#34;Received: {}\u0026#34;, e); 203 //Bad idea to do Thread.sleep or any blocking call. 204 //Use delayElements. 205 return e; 206 }).delayElements(Duration.ofDays(1)); 207 flux.subscribe(System.out::println); 208 //Use virtual time as test cant wait for 1 day 209 StepVerifier.withVirtualTime(() -\u0026gt; flux) 210 .thenAwait(Duration.ofDays(2)) 211 .expectNext(\u0026#34;jack\u0026#34;, \u0026#34;raj\u0026#34;) 212 .verifyComplete(); 213 } 214 215 /** 216 * ******************************************************************** 217 * block 218 * Never use .block() as it blocks the thread. 219 * Can we used in tests but not in main code. 220 * ******************************************************************** 221 */ 222 @Test 223 void test_block() { 224 String name = Mono.just(\u0026#34;jack\u0026#34;) 225 .block(); 226 System.out.println(name); 227 } 228 229 /** 230 * ******************************************************************** 231 * flux from array, list, stream 232 * ******************************************************************** 233 */ 234 @Test 235 void test_fromArray() { 236 Integer[] arr = {1, 2, 3, 4, 5}; 237 Flux\u0026lt;Integer\u0026gt; flux = Flux.fromArray(arr); 238 flux.subscribe(System.out::println); 239 StepVerifier.create(flux) 240 .expectNext(1, 2, 3, 4, 5) 241 .verifyComplete(); 242 } 243 244 @Test 245 void test_fromIterable() { 246 Flux\u0026lt;String\u0026gt; flux = Flux.fromIterable(List.of(\u0026#34;jack\u0026#34;, \u0026#34;raj\u0026#34;)); 247 StepVerifier.create(flux) 248 .expectNext(\u0026#34;jack\u0026#34;, \u0026#34;raj\u0026#34;) 249 .verifyComplete(); 250 } 251 252 @Test 253 void test_fromStream() { 254 Flux\u0026lt;Integer\u0026gt; flux = Flux.fromStream(() -\u0026gt; List.of(1, 2, 3, 4, 5).stream()); 255 StepVerifier.create(flux) 256 .expectNext(1, 2, 3, 4, 5) 257 .verifyComplete(); 258 } 259 260 /** 261 * ******************************************************************** 262 * flux range 263 * ******************************************************************** 264 */ 265 @Test 266 public void test_range() { 267 Flux\u0026lt;Integer\u0026gt; flux = Flux.range(1, 5); 268 flux.subscribe(n -\u0026gt; { 269 log.info(\u0026#34;number: {}\u0026#34;, n); 270 }); 271 StepVerifier.create(flux) 272 .expectNext(1, 2, 3, 4, 5) 273 .verifyComplete(); 274 } 275 276 /** 277 * ******************************************************************** 278 * map - synchronous by nature 279 * ******************************************************************** 280 */ 281 @Test 282 public void test_map() { 283 Flux\u0026lt;String\u0026gt; flux1 = Flux.just(\u0026#34;jack\u0026#34;, \u0026#34;raj\u0026#34;) 284 .map(String::toUpperCase); 285 StepVerifier 286 .create(flux1) 287 .expectNext(\u0026#34;JACK\u0026#34;, \u0026#34;RAJ\u0026#34;) 288 .verifyComplete(); 289 290 Flux\u0026lt;Integer\u0026gt; flux2 = Flux.range(3, 2) 291 .map(i -\u0026gt; i + 100); 292 flux2.subscribe(System.out::println); 293 StepVerifier.create(flux2) 294 .expectNext(103, 104) 295 .verifyComplete(); 296 } 297 298 /** 299 * ******************************************************************** 300 * flatMap - transform object 1-1 or 1-N in asynchronous fashion, returns back Mono/Flux. Use when there is delay/IO involved. 301 * map - transform an object 1-1 in fixed time in synchronous fashion. Use when there is no delay/IO involved. 302 * 303 * flatMap - processing is concurrent 304 * so all threads can run at same time not guarantee of being sequential. 305 * ******************************************************************** 306 */ 307 @Test 308 void test_flatMap() { 309 Flux flux1 = Flux.just(\u0026#34;jack\u0026#34;, \u0026#34;raj\u0026#34;) 310 .flatMap(HelperUtil::capitalizeReactive); 311 flux1.subscribe(System.out::println); 312 //No guarantee of order, jack can come first or raj can come first. 313 StepVerifier.create(flux1) 314 .expectSubscription() 315 .expectNextCount(2) 316 .verifyComplete(); 317 318 //capitalize will happen in blocking fashion. If this function takes long or does I/O then it will be blocking 319 //Use map only when there is no IO involved in the function 320 Flux flux2 = Flux.just(\u0026#34;jack\u0026#34;, \u0026#34;raj\u0026#34;) 321 .map(HelperUtil::capitalize); 322 flux2.subscribe(System.out::println); 323 StepVerifier.create(flux2) 324 .expectNext(\u0026#34;JACK\u0026#34;) 325 .expectNext(\u0026#34;RAJ\u0026#34;) 326 .verifyComplete(); 327 328 Flux flux3 = Flux.fromIterable(getCustomers()) 329 .flatMap(HelperUtil::capitalizeCustomerName); 330 flux1.subscribe(System.out::println); 331 //No guarantee of order 332 StepVerifier.create(flux3) 333 .expectNextCount(5) 334 .verifyComplete(); 335 } 336 337 /** 338 * Here flatMap will run one after other as there is just 1 thread allocated for it. 339 * You can also look at concatMap to do the same 340 */ 341 @Test 342 void test_flatMap_nonConcurrent() { 343 Flux\u0026lt;Integer\u0026gt; flux = Flux.range(1, 10) 344 .map(i -\u0026gt; i) 345 .flatMap(i -\u0026gt; { 346 return Mono.just(i); 347 }, 1); 348 flux.subscribe(System.out::println); 349 StepVerifier.create(flux) 350 .expectSubscription() 351 .expectNextCount(10) 352 .verifyComplete(); 353 } 354 355 /** 356 * ******************************************************************** 357 * flatMap - object modification 358 * ******************************************************************** 359 */ 360 @Test 361 void test_objectModification() { 362 //Modification of object in chain - done via flatMap 363 //Ideally create a new object instead of modifying the existing object. 364 Mono\u0026lt;Customer\u0026gt; mono = Mono.just(getCustomer()) 365 .flatMap(e -\u0026gt; { 366 e.setCity(\u0026#34;paris\u0026#34;); 367 return Mono.just(e); 368 }); 369 StepVerifier.create(mono) 370 .assertNext(e -\u0026gt; { 371 assertThat(e.getCity()).isEqualTo(\u0026#34;paris\u0026#34;); 372 }) 373 .verifyComplete(); 374 } 375 376 @Test 377 void test_objectModification_zipWith() { 378 //Modification of object in chain - done via zipWith 379 //The 2nd argument for zipWith is a combinator function that determines how the 2 mono are zipped 380 Mono\u0026lt;Customer\u0026gt; mono = Mono.just(getCustomer()) 381 .zipWith(Mono.just(\u0026#34;paris\u0026#34;), HelperUtil::changeCity); 382 StepVerifier.create(mono) 383 .assertNext(e -\u0026gt; { 384 assertThat(e.getCity()).isEqualTo(\u0026#34;paris\u0026#34;); 385 }) 386 .verifyComplete(); 387 } 388 389 /** 390 * ******************************************************************** 391 * distinct 392 * ******************************************************************** 393 */ 394 @Test 395 void test_distinct_flux() { 396 Flux\u0026lt;String\u0026gt; flux = Flux.fromIterable(List.of(\u0026#34;Jack\u0026#34;, \u0026#34;Joe\u0026#34;, \u0026#34;Jack\u0026#34;, \u0026#34;Jill\u0026#34;, \u0026#34;jack\u0026#34;)) 397 .map(String::toUpperCase) 398 .distinct(); 399 flux.subscribe(System.out::println); 400 StepVerifier.create(flux) 401 .expectNext(\u0026#34;JACK\u0026#34;, \u0026#34;JOE\u0026#34;, \u0026#34;JILL\u0026#34;) 402 .verifyComplete(); 403 } 404 405 /** 406 * ******************************************************************** 407 * concatMap - works only on flux, same as flatMap but order is preserved, concatMap takes more time but ordering is preserved. 408 * flatMap - Takes less time but ordering is lost. 409 * ******************************************************************** 410 */ 411 @Test 412 @SneakyThrows 413 void test_concatMap() { 414 Flux flux1 = Flux.just(\u0026#34;jack\u0026#34;, \u0026#34;raj\u0026#34;) 415 .concatMap(HelperUtil::capitalizeReactive); 416 flux1.subscribe(System.out::println); 417 //Guarantee of order, jack will come first then raj. 418 StepVerifier.create(flux1) 419 .expectSubscription() 420 .expectNext(\u0026#34;JACK\u0026#34;, \u0026#34;RAJ\u0026#34;) 421 .verifyComplete(); 422 423 Flux flux2 = Flux.fromIterable(getCustomers()) 424 .concatMap(HelperUtil::capitalizeCustomerName); 425 flux1.subscribe(System.out::println); 426 StepVerifier.create(flux2) 427 .expectSubscription() 428 .expectNextCount(5) 429 .verifyComplete(); 430 } 431 432 /** 433 * ******************************************************************** 434 * flatMapMany - similar to flatMap but flattens the flux 435 * ******************************************************************** 436 */ 437 @Test 438 void test_flatMapMany() { 439 Flux\u0026lt;String\u0026gt; flux1 = Mono.just(\u0026#34;the quick brown fox jumps over the lazy dog\u0026#34;) 440 .flatMapMany(e -\u0026gt; Flux.fromArray(e.toUpperCase().split(\u0026#34;\u0026#34;))) 441 .distinct() 442 .sort(); 443 flux1.subscribe(System.out::println); 444 //26 letters in the alphabet 445 StepVerifier.create(flux1) 446 .expectNextCount(26) 447 .expectComplete(); 448 449 Flux\u0026lt;Integer\u0026gt; flux2 = Mono.just(List.of(1, 2, 3)) 450 .flatMapMany(it -\u0026gt; Flux.fromIterable(it)); 451 flux2.subscribe(System.out::println); 452 StepVerifier 453 .create(flux2) 454 .expectNext(1, 2, 3) 455 .verifyComplete(); 456 457 Flux flux3 = Mono.just(getCustomers()) 458 .flatMapMany(e -\u0026gt; HelperUtil.capitalizeCustomerName(e)); 459 flux1.subscribe(System.out::println); 460 StepVerifier.create(flux3) 461 .expectSubscription() 462 .expectNextCount(5) 463 .verifyComplete(); 464 } 465 466 /** 467 * ******************************************************************** 468 * flatMapIterable - convert mono of list to flux 469 * ******************************************************************** 470 */ 471 @Test 472 void test_flatMapIterable() { 473 Mono\u0026lt;List\u0026lt;Integer\u0026gt;\u0026gt; mono = Mono.just(Arrays.asList(1, 2, 3)); 474 Flux\u0026lt;Integer\u0026gt; flux = mono.flatMapIterable(list -\u0026gt; list); 475 flux.subscribe(System.out::println); 476 StepVerifier 477 .create(flux) 478 .expectNext(1, 2, 3) 479 .verifyComplete(); 480 } 481 482 /** 483 * ******************************************************************** 484 * flatMapIterable - convert mono of map to flux 485 * ******************************************************************** 486 */ 487 @Test 488 void test_flatMapIterable2() { 489 Mono\u0026lt;Map\u0026lt;String, String\u0026gt;\u0026gt; mono = Mono.just(Map.of(\u0026#34;foo\u0026#34;, \u0026#34;bar\u0026#34;)); 490 Flux\u0026lt;Map.Entry\u0026lt;String, String\u0026gt;\u0026gt; flux = mono.flatMapIterable(list -\u0026gt; list.entrySet()); 491 flux.subscribe(System.out::println); 492 StepVerifier 493 .create(flux) 494 .expectNext(Map.entry(\u0026#34;foo\u0026#34;, \u0026#34;bar\u0026#34;)) 495 .verifyComplete(); 496 } 497 498 /** 499 * ******************************************************************** 500 * transform - accepts a Function functional interface. Used when similar transform is used in many places 501 * input is flux/mono 502 * output is flux/mono 503 * takes a flux/mono and returns a flux/mono 504 * ******************************************************************** 505 */ 506 @Test 507 void test_transform() { 508 //Function defines input and output 509 Function\u0026lt;Flux\u0026lt;String\u0026gt;, Flux\u0026lt;String\u0026gt;\u0026gt; upperCaseFunction = name -\u0026gt; name.map(String::toUpperCase); 510 Flux\u0026lt;String\u0026gt; flux = Flux.fromIterable(List.of(\u0026#34;Jack\u0026#34;, \u0026#34;Joe\u0026#34;)) 511 .transform(upperCaseFunction); 512 flux.subscribe(System.out::println); 513 StepVerifier 514 .create(flux) 515 .expectNext(\u0026#34;JACK\u0026#34;, \u0026#34;JOE\u0026#34;) 516 .verifyComplete(); 517 } 518 519 /** 520 * ******************************************************************** 521 * switchIfEmpty - similar to defaultIfEmpty but return flux/mono 522 * defaultIfEmpty - return a fixed value. 523 * ******************************************************************** 524 */ 525 @Test 526 @SneakyThrows 527 void test_defaultIfEmpty() { 528 Flux\u0026lt;Object\u0026gt; flux1 = Flux.empty() 529 .defaultIfEmpty(\u0026#34;empty\u0026#34;) 530 .log(); 531 StepVerifier.create(flux1) 532 .expectNext(\u0026#34;empty\u0026#34;) 533 .expectComplete() 534 .verify(); 535 536 Flux\u0026lt;Object\u0026gt; flux2 = Flux.empty() 537 .switchIfEmpty(Flux.just(\u0026#34;empty\u0026#34;)) 538 .log(); 539 StepVerifier.create(flux2) 540 .expectNext(\u0026#34;empty\u0026#34;) 541 .expectComplete() 542 .verify(); 543 } 544 545 @Test 546 void test_optional() { 547 var mono1 = getHello(true) 548 .defaultIfEmpty(\u0026#34;NONE\u0026#34;); 549 StepVerifier.create(mono1) 550 .expectNext(\u0026#34;HELLO\u0026#34;) 551 .verifyComplete(); 552 553 var mono2 = getHello(false) 554 .defaultIfEmpty(\u0026#34;NONE\u0026#34;); 555 StepVerifier.create(mono2) 556 .expectNext(\u0026#34;NONE\u0026#34;) 557 .verifyComplete(); 558 559 var mono3 = getOptionalHello(true) 560 .filter(Optional::isPresent) 561 .map(Optional::get); 562 StepVerifier.create(mono3) 563 .expectNext(\u0026#34;HELLO\u0026#34;) 564 .verifyComplete(); 565 566 var mono4 = getOptionalHello(false) 567 .filter(Optional::isPresent) 568 .map(Optional::get); 569 StepVerifier.create(mono4) 570 .expectNextCount(0) 571 .verifyComplete(); 572 } 573 574 private Mono\u0026lt;String\u0026gt; getHello(Boolean flag) { 575 if (flag) { 576 return Mono.just(\u0026#34;HELLO\u0026#34;); 577 } else { 578 return Mono.empty(); 579 } 580 } 581 582 private Mono\u0026lt;Optional\u0026lt;String\u0026gt;\u0026gt; getOptionalHello(Boolean flag) { 583 if (flag) { 584 return Mono.just(Optional.of(\u0026#34;HELLO\u0026#34;)); 585 } else { 586 return Mono.just(Optional.empty()); 587 } 588 } 589 590 /** 591 * ******************************************************************** 592 * switchIfEmpty with Optional 593 * ******************************************************************** 594 */ 595 @Test 596 public void test_switchIfEmpty() { 597 Mono\u0026lt;Optional\u0026lt;Customer\u0026gt;\u0026gt; c1 = Mono.justOrEmpty(Optional.empty()); 598 Mono\u0026lt;Optional\u0026lt;Customer\u0026gt;\u0026gt; c2 = Mono.just(Optional.of(getCustomer())); 599 600 Mono\u0026lt;Optional\u0026lt;Customer\u0026gt;\u0026gt; mono1 = c1 601 .switchIfEmpty(Mono.just(Optional.of(new Customer()))); 602 StepVerifier.create(mono1) 603 .expectNextCount(1) 604 .expectComplete() 605 .verify(); 606 607 Mono\u0026lt;Optional\u0026lt;Customer\u0026gt;\u0026gt; mono2 = c2 608 .switchIfEmpty(Mono.just(Optional.empty())); 609 StepVerifier.create(mono2) 610 .expectNextCount(1) 611 .expectComplete() 612 .verify(); 613 } 614 615 /** 616 * ******************************************************************** 617 * switchIfEmpty - Used as if-else block 618 * ******************************************************************** 619 */ 620 @Test 621 void test_switchIfEmpty_if_else() { 622 final Customer customer = getCustomer(); 623 //No need to use Mono.defer on the switchIfEmpty 624 Mono\u0026lt;String\u0026gt; mono = Mono.just(customer) 625 .flatMap(e -\u0026gt; { 626 if (customer.getCity().equals(\u0026#34;bangalore\u0026#34;)) { 627 return Mono.just(\u0026#34;Timezone:IST\u0026#34;); 628 } else { 629 return Mono.empty(); 630 } 631 }) 632 .switchIfEmpty(Mono.just(\u0026#34;Timezone:GMT\u0026#34;)); 633 634 StepVerifier.create(mono) 635 .expectNext(\u0026#34;Timezone:GMT\u0026#34;) 636 .verifyComplete(); 637 } 638 639 /** 640 * ******************************************************************** 641 * filterWhen - returns Mono 642 * filter - returns object 643 * ******************************************************************** 644 */ 645 @Test 646 void test_filterWhen() { 647 Flux\u0026lt;String\u0026gt; flux1 = Flux.just(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;, \u0026#34;pumpkin\u0026#34;, \u0026#34;papaya\u0026#34;, \u0026#34;walnuts\u0026#34;, \u0026#34;grapes\u0026#34;, \u0026#34;pineapple\u0026#34;).cache(); 648 Flux\u0026lt;String\u0026gt; flux2 = flux1.filterWhen(f -\u0026gt; Mono.just(f.equals(\u0026#34;apple\u0026#34;))); 649 flux2.subscribe(System.out::println); 650 StepVerifier.create(flux2) 651 .expectNext(\u0026#34;apple\u0026#34;) 652 .verifyComplete(); 653 } 654 655 /** 656 * ******************************************************************** 657 * filterWhen - returns Mono 658 * filter - returns object 659 * ******************************************************************** 660 */ 661 @Test 662 void test_filter() { 663 Flux\u0026lt;String\u0026gt; flux1 = Flux.just(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;, \u0026#34;pumpkin\u0026#34;, \u0026#34;papaya\u0026#34;, \u0026#34;walnuts\u0026#34;, \u0026#34;grapes\u0026#34;, \u0026#34;pineapple\u0026#34;).cache(); 664 Flux\u0026lt;String\u0026gt; flux2 = flux1.filter(f -\u0026gt; f.equals(\u0026#34;apple\u0026#34;)); 665 flux2.subscribe(System.out::println); 666 StepVerifier.create(flux2) 667 .expectNext(\u0026#34;apple\u0026#34;) 668 .verifyComplete(); 669 670 //Get even numbers 671 Flux flux = Flux.just(1, 2, 3, 4, 5) 672 .filter(i -\u0026gt; i % 2 == 0); 673 flux.subscribe(System.out::println); 674 StepVerifier.create(flux) 675 .expectNext(2, 4) 676 .verifyComplete(); 677 } 678 679 /** 680 * ******************************************************************** 681 * intersect (common) - compare 2 flux for common elements 682 * ******************************************************************** 683 */ 684 @Test 685 void test_intersect_inefficient() { 686 Flux\u0026lt;String\u0026gt; flux1 = Flux.just(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;, \u0026#34;banana\u0026#34;); 687 //Without cache on flux2 it will subscribe many times. 688 Flux\u0026lt;String\u0026gt; flux2 = Flux.just(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;, \u0026#34;pumpkin\u0026#34;, \u0026#34;papaya\u0026#34;, \u0026#34;walnuts\u0026#34;, \u0026#34;grapes\u0026#34;, \u0026#34;pineapple\u0026#34;).cache(); 689 Flux\u0026lt;String\u0026gt; commonFlux = flux1.filter(f -\u0026gt; { 690 //toStream will block so should be avoided. 691 //Inefficient - toStream will block so should be avoided. 692 //Not for live stream or stream that can be subscribed only once. 693 return flux2.toStream().anyMatch(e -\u0026gt; e.equals(f)); 694 }); 695 commonFlux.subscribe(System.out::println); 696 StepVerifier.create(commonFlux) 697 .expectNext(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;) 698 .verifyComplete(); 699 } 700 701 /** 702 * ******************************************************************** 703 * intersect (common) - compare 2 flux for common elements 704 * ******************************************************************** 705 */ 706 @Test 707 void test_intersect_efficient_1() { 708 Flux\u0026lt;String\u0026gt; flux1 = Flux.just(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;, \u0026#34;banana\u0026#34;); 709 Flux\u0026lt;String\u0026gt; flux2 = Flux.just(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;, \u0026#34;pumpkin\u0026#34;, \u0026#34;papaya\u0026#34;, \u0026#34;walnuts\u0026#34;, \u0026#34;grapes\u0026#34;, \u0026#34;pineapple\u0026#34;); 710 Flux\u0026lt;String\u0026gt; commonFlux = flux1 711 .collect(Collectors.toSet()) 712 .flatMapMany(set -\u0026gt; { 713 return flux2 714 //Filter out matching 715 //Limitation is that you can only compare 1 value collected in set. 716 .filter(t -\u0026gt; set.contains(t)); 717 }); 718 commonFlux.subscribe(System.out::println); 719 StepVerifier.create(commonFlux) 720 .expectNext(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;) 721 .verifyComplete(); 722 } 723 724 /** 725 * ******************************************************************** 726 * intersect (common) - using join operator 727 * ******************************************************************** 728 */ 729 @Test 730 void test_intersect_efficient_2() { 731 Flux\u0026lt;String\u0026gt; flux1 = Flux.just(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;, \u0026#34;banana\u0026#34;); 732 Flux\u0026lt;String\u0026gt; flux2 = Flux.just(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;, \u0026#34;pumpkin\u0026#34;, \u0026#34;papaya\u0026#34;, \u0026#34;walnuts\u0026#34;, \u0026#34;grapes\u0026#34;, \u0026#34;pineapple\u0026#34;); 733 734 Flux\u0026lt;String\u0026gt; commonFlux = flux1.join(flux2, s -\u0026gt; Flux.never(), s -\u0026gt; Flux.never(), Tuples::of) 735 //Filter out matching 736 .filter(t -\u0026gt; t.getT1().equals(t.getT2())) 737 //Revert to single value 738 .map(Tuple2::getT1) 739 //Remove duplicates, if any 740 .groupBy(f -\u0026gt; f) 741 .map(GroupedFlux::key); 742 commonFlux.subscribe(System.out::println); 743 StepVerifier.create(commonFlux) 744 .expectNext(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;) 745 .verifyComplete(); 746 } 747 748 @Test 749 void test_intersect_efficient_3() { 750 Flux\u0026lt;String\u0026gt; flux1 = Flux.just(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;, \u0026#34;banana\u0026#34;); 751 Flux\u0026lt;String\u0026gt; flux2 = Flux.just(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;, \u0026#34;pumpkin\u0026#34;, \u0026#34;papaya\u0026#34;, \u0026#34;walnuts\u0026#34;, \u0026#34;grapes\u0026#34;, \u0026#34;pineapple\u0026#34;); 752 753 Mono\u0026lt;List\u0026lt;String\u0026gt;\u0026gt; monoList1 = flux1.collectList(); 754 Mono\u0026lt;List\u0026lt;String\u0026gt;\u0026gt; monoList2 = flux2.collectList(); 755 756 Flux\u0026lt;String\u0026gt; commonFlux = Mono.zip(monoList1, monoList2) 757 .map(tuple -\u0026gt; { 758 List\u0026lt;String\u0026gt; list1 = tuple.getT1(); 759 List\u0026lt;String\u0026gt; list2 = tuple.getT2(); 760 list1.retainAll(list2); 761 return list1; 762 }).flatMapIterable(e -\u0026gt; e); 763 764 commonFlux.subscribe(System.out::println); 765 StepVerifier.create(commonFlux) 766 .expectNext(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;) 767 .verifyComplete(); 768 } 769 770 @Test 771 void test_intersect_efficient_4() { 772 Flux\u0026lt;String\u0026gt; flux1 = Flux.just(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;, \u0026#34;banana\u0026#34;); 773 Flux\u0026lt;String\u0026gt; flux2 = Flux.just(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;, \u0026#34;pumpkin\u0026#34;, \u0026#34;papaya\u0026#34;, \u0026#34;walnuts\u0026#34;, \u0026#34;grapes\u0026#34;, \u0026#34;pineapple\u0026#34;); 774 775 Flux\u0026lt;String\u0026gt; commonFlux = flux2.filterWhen(element -\u0026gt; 776 flux1.any(e -\u0026gt; e.equals(element)) 777 ); 778 779 commonFlux.subscribe(System.out::println); 780 StepVerifier.create(commonFlux) 781 .expectNext(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;) 782 .verifyComplete(); 783 } 784 785 @Test 786 void test_intersect_efficient_5() { 787 Flux\u0026lt;String\u0026gt; flux1 = Flux.just(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;, \u0026#34;banana\u0026#34;); 788 Flux\u0026lt;String\u0026gt; flux2 = Flux.just(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;, \u0026#34;pumpkin\u0026#34;, \u0026#34;papaya\u0026#34;, \u0026#34;walnuts\u0026#34;, \u0026#34;grapes\u0026#34;, \u0026#34;pineapple\u0026#34;); 789 790 Flux\u0026lt;String\u0026gt; commonFlux = flux2.concatMap(element2 -\u0026gt; 791 flux1.filter(element1 -\u0026gt; element1.equals(element2)).take(1) 792 ); 793 commonFlux.subscribe(System.out::println); 794 StepVerifier.create(commonFlux) 795 .expectNext(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;) 796 .verifyComplete(); 797 } 798 799 @Test 800 void test_intersect_efficient_6() { 801 Flux\u0026lt;String\u0026gt; flux1 = Flux.just(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;, \u0026#34;banana\u0026#34;); 802 Flux\u0026lt;String\u0026gt; flux2 = Flux.just(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;, \u0026#34;pumpkin\u0026#34;, \u0026#34;papaya\u0026#34;, \u0026#34;walnuts\u0026#34;, \u0026#34;grapes\u0026#34;, \u0026#34;pineapple\u0026#34;); 803 804 Flux\u0026lt;String\u0026gt; commonFlux = flux2.flatMap(element2 -\u0026gt; 805 flux1.flatMap(element1 -\u0026gt; 806 element1.equals(element2) ? Flux.just(element1) : Flux.empty() 807 ).take(1) 808 ); 809 commonFlux.subscribe(System.out::println); 810 StepVerifier.create(commonFlux) 811 .expectNext(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;) 812 .verifyComplete(); 813 } 814 815 /** 816 * ******************************************************************** 817 * startWith - add new element to flux. 818 * ******************************************************************** 819 */ 820 @Test 821 public void test_startWith() { 822 Flux\u0026lt;Integer\u0026gt; flux1 = Flux.range(1, 3); 823 Flux\u0026lt;Integer\u0026gt; flux2 = flux1.startWith(0); 824 StepVerifier.create(flux2) 825 .expectNext(0, 1, 2, 3) 826 .verifyComplete(); 827 } 828 829 /** 830 * ******************************************************************** 831 * index 832 * ******************************************************************** 833 */ 834 @Test 835 void test_index() { 836 //append a number to each element. 837 Flux\u0026lt;Tuple2\u0026lt;Long, String\u0026gt;\u0026gt; flux = Flux 838 .just(\u0026#34;apple\u0026#34;, \u0026#34;banana\u0026#34;, \u0026#34;orange\u0026#34;) 839 .index(); 840 StepVerifier.create(flux) 841 .expectNext(Tuples.of(0L, \u0026#34;apple\u0026#34;)) 842 .expectNext(Tuples.of(1L, \u0026#34;banana\u0026#34;)) 843 .expectNext(Tuples.of(2L, \u0026#34;orange\u0026#34;)) 844 .verifyComplete(); 845 } 846 847 /** 848 * ******************************************************************** 849 * takeWhile 850 * ******************************************************************** 851 */ 852 @Test 853 void test_takeWhile() { 854 Flux\u0026lt;Integer\u0026gt; flux = Flux.range(1, 10); 855 Flux\u0026lt;Integer\u0026gt; takeWhile = flux.takeWhile(i -\u0026gt; i \u0026lt;= 5); 856 StepVerifier 857 .create(takeWhile) 858 .expectNext(1, 2, 3, 4, 5) 859 .verifyComplete(); 860 } 861 862 /** 863 * ******************************************************************** 864 * skipWhile 865 * ******************************************************************** 866 */ 867 @Test 868 void test_skipWhile() { 869 Flux\u0026lt;Integer\u0026gt; flux = Flux.range(1, 10); 870 Flux\u0026lt;Integer\u0026gt; skipWhile = flux.skipWhile(i -\u0026gt; i \u0026lt;= 5); 871 StepVerifier 872 .create(skipWhile) 873 .expectNext(6, 7, 8, 9, 10) 874 .verifyComplete(); 875 } 876 877 /** 878 * ******************************************************************** 879 * collectList - flux to mono of list 880 * ******************************************************************** 881 */ 882 @Test 883 void test_collectList() { 884 Mono\u0026lt;List\u0026lt;Integer\u0026gt;\u0026gt; flux = Flux 885 .just(1, 2, 3) 886 .collectList(); 887 StepVerifier.create(flux) 888 .expectNext(Arrays.asList(1, 2, 3)) 889 .verifyComplete(); 890 } 891 892 /** 893 * ******************************************************************** 894 * collectSortedList- flux to mono of list 895 * ******************************************************************** 896 */ 897 @Test 898 void test_collectSortedList() { 899 Mono\u0026lt;List\u0026lt;Integer\u0026gt;\u0026gt; listMono2 = Flux 900 .just(5, 2, 4, 1, 3) 901 .collectSortedList(); 902 StepVerifier.create(listMono2) 903 .expectNext(Arrays.asList(1, 2, 3, 4, 5)) 904 .verifyComplete(); 905 } 906 907 /** 908 * ******************************************************************** 909 * collectMap 910 * ******************************************************************** 911 */ 912 @Test 913 void test_collectMap() { 914 Mono\u0026lt;Map\u0026lt;Object, Object\u0026gt;\u0026gt; flux = Flux.just(\u0026#34;yellow:banana\u0026#34;, \u0026#34;red:apple\u0026#34;) 915 .map(item -\u0026gt; item.split(\u0026#34;:\u0026#34;)) 916 .collectMap(item -\u0026gt; item[0], item -\u0026gt; item[1]); 917 918 Map\u0026lt;Object, Object\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); 919 flux.subscribe(map::putAll); 920 map.forEach((key, value) -\u0026gt; System.out.println(key + \u0026#34; -\u0026gt; \u0026#34; + value)); 921 922 StepVerifier.create(flux) 923 .expectNext(Map.of(\u0026#34;yellow\u0026#34;, \u0026#34;banana\u0026#34;, \u0026#34;red\u0026#34;, \u0026#34;apple\u0026#34;)) 924 .verifyComplete(); 925 } 926 927 /** 928 * ******************************************************************** 929 * collectMultimap 930 * ******************************************************************** 931 */ 932 @Test 933 void test_collectMultimap() { 934 Mono\u0026lt;Map\u0026lt;String, Collection\u0026lt;String\u0026gt;\u0026gt;\u0026gt; flux = Flux.just(\u0026#34;yellow:banana\u0026#34;, \u0026#34;red:grapes\u0026#34;, \u0026#34;red:apple\u0026#34;, \u0026#34;yellow:pineapple\u0026#34;) 935 .map(item -\u0026gt; item.split(\u0026#34;:\u0026#34;)) 936 .collectMultimap( 937 item -\u0026gt; item[0], 938 item -\u0026gt; item[1]); 939 Map\u0026lt;Object, Collection\u0026lt;String\u0026gt;\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); 940 flux.subscribe(map::putAll); 941 map.forEach((key, value) -\u0026gt; System.out.println(key + \u0026#34; -\u0026gt; \u0026#34; + value)); 942 943 StepVerifier.create(flux) 944 .expectNext(Map.of(\u0026#34;red\u0026#34;, List.of(\u0026#34;grapes\u0026#34;, \u0026#34;apple\u0026#34;), \u0026#34;yellow\u0026#34;, List.of(\u0026#34;banana\u0026#34;, \u0026#34;pineapple\u0026#34;))) 945 .verifyComplete(); 946 } 947 948 /** 949 * ******************************************************************** 950 * concat - subscribes to publishers in sequence, order guaranteed, static function 951 * concatWith - subscribes to publishers in sequence, order guaranteed, instance function 952 * ******************************************************************** 953 */ 954 @Test 955 @SneakyThrows 956 void test_concat() { 957 Flux\u0026lt;String\u0026gt; flux1 = Flux.just(\u0026#34;a\u0026#34;, \u0026#34;b\u0026#34;); 958 Flux\u0026lt;String\u0026gt; flux2 = Flux.just(\u0026#34;c\u0026#34;, \u0026#34;d\u0026#34;); 959 Flux\u0026lt;String\u0026gt; flux3 = Flux.concat(flux1, flux2); 960 961 StepVerifier.create(flux3) 962 .expectSubscription() 963 .expectNext(\u0026#34;a\u0026#34;, \u0026#34;b\u0026#34;, \u0026#34;c\u0026#34;, \u0026#34;d\u0026#34;) 964 .verifyComplete(); 965 966 Flux\u0026lt;String\u0026gt; flux4 = Flux.just(\u0026#34;a\u0026#34;, \u0026#34;b\u0026#34;).delayElements(Duration.ofMillis(200)); 967 Flux\u0026lt;String\u0026gt; flux5 = Flux.just(\u0026#34;c\u0026#34;, \u0026#34;d\u0026#34;); 968 //Lazy will wait till first flux finishes. 969 Flux\u0026lt;String\u0026gt; flux6 = Flux.concat(flux1, flux2).log(); 970 971 StepVerifier.create(flux6) 972 .expectSubscription() 973 .expectNext(\u0026#34;a\u0026#34;, \u0026#34;b\u0026#34;, \u0026#34;c\u0026#34;, \u0026#34;d\u0026#34;) 974 .verifyComplete(); 975 } 976 977 /** 978 * ******************************************************************** 979 * concat - subscribes to publishers in sequence, order guaranteed, static function 980 * concatWith - subscribes to publishers in sequence, order guaranteed, instance function 981 * ******************************************************************** 982 */ 983 @Test 984 @SneakyThrows 985 void test_concatWith() { 986 Flux\u0026lt;String\u0026gt; flux1 = Flux.just(\u0026#34;a\u0026#34;, \u0026#34;b\u0026#34;); 987 Flux\u0026lt;String\u0026gt; flux2 = Flux.just(\u0026#34;c\u0026#34;, \u0026#34;d\u0026#34;); 988 Flux\u0026lt;String\u0026gt; flux3 = flux1.concatWith(flux2); 989 StepVerifier.create(flux3) 990 .expectSubscription() 991 .expectNext(\u0026#34;a\u0026#34;, \u0026#34;b\u0026#34;, \u0026#34;c\u0026#34;, \u0026#34;d\u0026#34;) 992 .verifyComplete(); 993 994 Mono\u0026lt;String\u0026gt; aFlux = Mono.just(\u0026#34;a\u0026#34;); 995 Mono\u0026lt;String\u0026gt; bFlux = Mono.just(\u0026#34;b\u0026#34;); 996 Flux\u0026lt;String\u0026gt; stringFlux = aFlux.concatWith(bFlux); 997 stringFlux.subscribe(System.out::println); 998 StepVerifier.create(stringFlux) 999 .expectNext(\u0026#34;a\u0026#34;, \u0026#34;b\u0026#34;) 1000 .verifyComplete(); 1001 } 1002 1003 /** 1004 * ******************************************************************** 1005 * concatDelayError - When one flux can throw an error 1006 * ******************************************************************** 1007 */ 1008 @Test 1009 void test_concatDelayError() { 1010 Flux\u0026lt;String\u0026gt; flux1 = Flux.just(\u0026#34;a\u0026#34;, \u0026#34;b\u0026#34;, \u0026#34;c\u0026#34;) 1011 .map(s -\u0026gt; { 1012 if (s.equals(\u0026#34;b\u0026#34;)) { 1013 throw new RuntimeException(\u0026#34;error!\u0026#34;); 1014 } 1015 return s; 1016 }); 1017 Flux\u0026lt;String\u0026gt; flux2 = Flux.just(\u0026#34;d\u0026#34;, \u0026#34;e\u0026#34;, \u0026#34;f\u0026#34;); 1018 Flux\u0026lt;String\u0026gt; flux3 = Flux.concatDelayError(flux1, flux2); 1019 1020 StepVerifier.create(flux3) 1021 .expectSubscription() 1022 .expectNext(\u0026#34;a\u0026#34;, \u0026#34;d\u0026#34;, \u0026#34;e\u0026#34;, \u0026#34;f\u0026#34;) 1023 .expectError() 1024 .verify(); 1025 } 1026 1027 /** 1028 * ******************************************************************** 1029 * combineLatest - will change order based on time. Rarely used. 1030 * ******************************************************************** 1031 */ 1032 @Test 1033 void test_combineLatest() { 1034 Flux\u0026lt;String\u0026gt; flux1 = Flux.just(\u0026#34;a\u0026#34;, \u0026#34;b\u0026#34;); 1035 Flux\u0026lt;String\u0026gt; flux2 = Flux.just(\u0026#34;c\u0026#34;, \u0026#34;d\u0026#34;); 1036 Flux\u0026lt;String\u0026gt; flux3 = Flux.combineLatest(flux1, flux2, (s1, s2) -\u0026gt; s1 + s2) 1037 .log(); 1038 StepVerifier.create(flux3) 1039 .expectSubscription() 1040 .expectNext(\u0026#34;bc\u0026#34;, \u0026#34;bd\u0026#34;) 1041 .verifyComplete(); 1042 } 1043 1044 /** 1045 * ******************************************************************** 1046 * merge - subscribes to publishers eagerly, order not guaranteed, static function 1047 * ******************************************************************** 1048 */ 1049 @Test 1050 @SneakyThrows 1051 void test_merge() { 1052 Flux\u0026lt;String\u0026gt; flux1 = Flux.just(\u0026#34;a\u0026#34;, \u0026#34;b\u0026#34;).delayElements(Duration.ofMillis(200)); 1053 Flux\u0026lt;String\u0026gt; flux2 = Flux.just(\u0026#34;c\u0026#34;, \u0026#34;d\u0026#34;); 1054 //Eager will not wait till first flux3 finishes. 1055 Flux\u0026lt;String\u0026gt; flux3 = Flux.merge(flux1, flux2); 1056 1057 StepVerifier.create(flux3) 1058 .expectSubscription() 1059 .expectNext(\u0026#34;c\u0026#34;, \u0026#34;d\u0026#34;, \u0026#34;a\u0026#34;, \u0026#34;b\u0026#34;) 1060 .verifyComplete(); 1061 } 1062 1063 /** 1064 * ******************************************************************** 1065 * mergeWith - subscribes to publishers in eagerly, order not guaranteed, instance function 1066 * ******************************************************************** 1067 */ 1068 @Test 1069 @SneakyThrows 1070 void test_mergeWith() { 1071 Flux\u0026lt;String\u0026gt; flux1 = Flux.just(\u0026#34;a\u0026#34;, \u0026#34;b\u0026#34;).delayElements(Duration.ofMillis(200)); 1072 Flux\u0026lt;String\u0026gt; flux2 = Flux.just(\u0026#34;c\u0026#34;, \u0026#34;d\u0026#34;); 1073 //Eager will not wait till first flux finishes. 1074 Flux\u0026lt;String\u0026gt; flux3 = flux1.mergeWith(flux2); 1075 1076 StepVerifier.create(flux3) 1077 .expectSubscription() 1078 .expectNext(\u0026#34;c\u0026#34;, \u0026#34;d\u0026#34;, \u0026#34;a\u0026#34;, \u0026#34;b\u0026#34;) 1079 .verifyComplete(); 1080 1081 Mono aMono = Mono.just(\u0026#34;a\u0026#34;); 1082 Mono bMono = Mono.just(\u0026#34;b\u0026#34;); 1083 Flux flux4 = aMono.mergeWith(bMono); 1084 StepVerifier.create(flux4) 1085 .expectNext(\u0026#34;a\u0026#34;, \u0026#34;b\u0026#34;) 1086 .verifyComplete(); 1087 } 1088 1089 /** 1090 * ******************************************************************** 1091 * mergeSequential - subscribes to publishers eagerly, result is sequential. 1092 * concat - subscribes to publishers in sequence, result is sequential. 1093 * ******************************************************************** 1094 */ 1095 @Test 1096 @SneakyThrows 1097 void test_mergeSequential() { 1098 Flux\u0026lt;String\u0026gt; flux1 = Flux.just(\u0026#34;a\u0026#34;, \u0026#34;b\u0026#34;).delayElements(Duration.ofMillis(200)); 1099 Flux\u0026lt;String\u0026gt; flux2 = Flux.just(\u0026#34;c\u0026#34;, \u0026#34;d\u0026#34;); 1100 Flux\u0026lt;String\u0026gt; flux3 = Flux.mergeSequential(flux1, flux2, flux1); 1101 1102 StepVerifier.create(flux3) 1103 .expectSubscription() 1104 .expectNext(\u0026#34;a\u0026#34;, \u0026#34;b\u0026#34;, \u0026#34;c\u0026#34;, \u0026#34;d\u0026#34;, \u0026#34;a\u0026#34;, \u0026#34;b\u0026#34;) 1105 .verifyComplete(); 1106 } 1107 1108 /** 1109 * ******************************************************************** 1110 * mergeDelayError - when one flux can throw an error 1111 * ******************************************************************** 1112 */ 1113 @Test 1114 void test_mergeDelayError() { 1115 Flux\u0026lt;String\u0026gt; flux1 = Flux.just(\u0026#34;a\u0026#34;, \u0026#34;b\u0026#34;) 1116 .map(s -\u0026gt; { 1117 if (s.equals(\u0026#34;b\u0026#34;)) { 1118 throw new RuntimeException(\u0026#34;error\u0026#34;); 1119 } 1120 return s; 1121 }).doOnError(e -\u0026gt; log.error(\u0026#34;Error: {}\u0026#34;, e)); 1122 1123 Flux\u0026lt;String\u0026gt; flux2 = Flux.just(\u0026#34;c\u0026#34;, \u0026#34;d\u0026#34;); 1124 Flux\u0026lt;String\u0026gt; flux3 = Flux.mergeDelayError(1, flux1, flux2, flux1); 1125 1126 StepVerifier.create(flux3) 1127 .expectSubscription() 1128 .expectNext(\u0026#34;a\u0026#34;, \u0026#34;c\u0026#34;, \u0026#34;d\u0026#34;, \u0026#34;a\u0026#34;) 1129 .expectError() 1130 .verify(); 1131 } 1132 1133 /** 1134 * ******************************************************************** 1135 * zip - subscribes to publishers in eagerly, waits for both flux to emit one element. 1136 * 2-8 flux can be zipped, returns a tuple, Static function 1137 * ******************************************************************** 1138 */ 1139 @Test 1140 void test_zip() { 1141 Flux\u0026lt;String\u0026gt; flux1 = Flux.just(\u0026#34;red\u0026#34;, \u0026#34;yellow\u0026#34;); 1142 Flux\u0026lt;String\u0026gt; flux2 = Flux.just(\u0026#34;apple\u0026#34;, \u0026#34;banana\u0026#34;); 1143 Flux\u0026lt;String\u0026gt; flux3 = Flux.zip(flux1, flux2) 1144 .map(tuple -\u0026gt; { 1145 return (tuple.getT1() + \u0026#34; \u0026#34; + tuple.getT2()); 1146 }); 1147 flux3.subscribe(System.out::println); 1148 StepVerifier.create(flux3) 1149 .expectNext(\u0026#34;red apple\u0026#34;) 1150 .expectNext(\u0026#34;yellow banana\u0026#34;) 1151 .verifyComplete(); 1152 1153 //Third argument is combinator lambda 1154 Flux\u0026lt;Integer\u0026gt; firstFlux = Flux.just(1, 2, 3); 1155 Flux\u0026lt;Integer\u0026gt; secondFlux = Flux.just(10, 20, 30, 40); 1156 //Define how the zip should happen 1157 Flux\u0026lt;Integer\u0026gt; zip = Flux.zip(firstFlux, secondFlux, (num1, num2) -\u0026gt; num1 + num2); 1158 StepVerifier 1159 .create(zip) 1160 .expectNext(11, 22, 33) 1161 .verifyComplete(); 1162 } 1163 1164 /** 1165 * ******************************************************************** 1166 * zipWith - subscribes to publishers in eagerly, waits for both flux to emit one element. 1167 * 2-8 flux can be zipped, returns a tuple, Instance function 1168 * When 2 different size flux are combined zipWith return the smaller item size new flux. 1169 * ******************************************************************** 1170 */ 1171 @Test 1172 void test_zipWith() { 1173 Flux\u0026lt;String\u0026gt; flux1 = Flux.just(\u0026#34;red\u0026#34;, \u0026#34;yellow\u0026#34;); 1174 Flux\u0026lt;String\u0026gt; flux2 = Flux.just(\u0026#34;apple\u0026#34;, \u0026#34;banana\u0026#34;); 1175 Flux\u0026lt;String\u0026gt; flux3 = flux1.zipWith(flux2) 1176 .map(tuple -\u0026gt; { 1177 return (tuple.getT1() + \u0026#34; \u0026#34; + tuple.getT2()); 1178 }); 1179 StepVerifier.create(flux3) 1180 .expectNext(\u0026#34;red apple\u0026#34;) 1181 .expectNext(\u0026#34;yellow banana\u0026#34;) 1182 .verifyComplete(); 1183 1184 Flux\u0026lt;String\u0026gt; flux4 = Flux.fromIterable(Arrays.asList(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;, \u0026#34;banana\u0026#34;)) 1185 .zipWith(Flux.range(1, 5), (word, line) -\u0026gt; { 1186 return line + \u0026#34;. \u0026#34; + word; 1187 }); 1188 StepVerifier.create(flux4) 1189 .expectNext(\u0026#34;1. apple\u0026#34;) 1190 .expectNext(\u0026#34;2. orange\u0026#34;) 1191 .expectNext(\u0026#34;3. banana\u0026#34;) 1192 .verifyComplete(); 1193 } 1194 1195 /** 1196 * ******************************************************************** 1197 * Cant do zipWith to combine mono \u0026amp; flux. 1198 * Use the operator join 1199 * ******************************************************************** 1200 */ 1201 @Test 1202 void test_join() { 1203 Mono\u0026lt;String\u0026gt; mono = Mono.just(\u0026#34;green\u0026#34;); 1204 Flux\u0026lt;String\u0026gt; flux1 = Flux.just(\u0026#34;apple\u0026#34;, \u0026#34;banana\u0026#34;) 1205 .join(mono, s -\u0026gt; Flux.never(), s -\u0026gt; Flux.never(), Tuples::of) 1206 .flatMap(tuple -\u0026gt; { 1207 return Mono.just(tuple.getT2() + \u0026#34; \u0026#34; + tuple.getT1()); 1208 }); 1209 StepVerifier.create(flux1) 1210 .expectNext(\u0026#34;green apple\u0026#34;) 1211 .expectNext(\u0026#34;green banana\u0026#34;) 1212 .verifyComplete(); 1213 1214 Flux\u0026lt;String\u0026gt; flux2 = Flux.just(\u0026#34;apple\u0026#34;, \u0026#34;banana\u0026#34;) 1215 .zipWith(mono.cache().repeat()) 1216 .flatMap(tuple -\u0026gt; { 1217 return Mono.just(tuple.getT2() + \u0026#34; \u0026#34; + tuple.getT1()); 1218 }); 1219 StepVerifier.create(flux2) 1220 .expectNext(\u0026#34;green apple\u0026#34;) 1221 .expectNext(\u0026#34;green banana\u0026#34;) 1222 .verifyComplete(); 1223 } 1224 1225 /** 1226 * ******************************************************************** 1227 * error 1228 * ******************************************************************** 1229 */ 1230 @Test 1231 void test_onError() { 1232 Mono\u0026lt;String\u0026gt; mono1 = Mono.just(\u0026#34;jack\u0026#34;) 1233 .map(s -\u0026gt; { 1234 throw new RuntimeException(\u0026#34;ERROR\u0026#34;); 1235 }); 1236 mono1.subscribe(s -\u0026gt; log.info(\u0026#34;name: {}\u0026#34;, s), Throwable::printStackTrace); 1237 StepVerifier.create(mono1) 1238 .expectError(RuntimeException.class) 1239 .verify(); 1240 1241 System.out.println(\u0026#34;********************************************************************\u0026#34;); 1242 1243 Mono\u0026lt;String\u0026gt; mono2 = Mono.just(\u0026#34;jack\u0026#34;) 1244 .flatMap(s -\u0026gt; { 1245 return Mono.error(new RuntimeException(\u0026#34;ERROR\u0026#34;)); 1246 }); 1247 mono2.subscribe(s -\u0026gt; log.info(\u0026#34;name: {}\u0026#34;, s), Throwable::printStackTrace); 1248 1249 StepVerifier.create(mono2) 1250 .expectError(RuntimeException.class) 1251 .verify(); 1252 } 1253 1254 /** 1255 * ******************************************************************** 1256 * Error Recover Handling 1257 * onErrorReturn - Return value on error 1258 * ******************************************************************** 1259 */ 1260 @Test 1261 void test_onErrorReturn() { 1262 Mono\u0026lt;Object\u0026gt; mono1 = Mono.error(new RuntimeException(\u0026#34;error\u0026#34;)) 1263 .onErrorReturn(\u0026#34;Jack\u0026#34;); 1264 StepVerifier.create(mono1) 1265 .expectNext(\u0026#34;Jack\u0026#34;) 1266 .verifyComplete(); 1267 } 1268 1269 /** 1270 * ******************************************************************** 1271 * Error Recover Handling 1272 * onErrorResume - Resume chain with new mono/flux. 1273 * ******************************************************************** 1274 */ 1275 @Test 1276 void test_onErrorResume() { 1277 Mono\u0026lt;Object\u0026gt; mono1 = Mono.error(new RuntimeException(\u0026#34;error\u0026#34;)) 1278 .onErrorResume(e -\u0026gt; Mono.just(\u0026#34;Jack\u0026#34;)); 1279 StepVerifier.create(mono1) 1280 .expectNext(\u0026#34;Jack\u0026#34;) 1281 .verifyComplete(); 1282 1283 Mono\u0026lt;Object\u0026gt; mono2 = Mono.error(new RuntimeException(\u0026#34;error\u0026#34;)) 1284 .onErrorResume(s -\u0026gt; { 1285 log.info(\u0026#34;Inside on onErrorResume\u0026#34;); 1286 return Mono.just(\u0026#34;Jack\u0026#34;); 1287 }) 1288 .log(); 1289 StepVerifier.create(mono2) 1290 .expectNext(\u0026#34;Jack\u0026#34;) 1291 .verifyComplete(); 1292 } 1293 1294 /** 1295 * ******************************************************************** 1296 * Error Recover Handling 1297 * onErrorContinue - Continue chain even if error occurs 1298 * ******************************************************************** 1299 */ 1300 @Test 1301 void test_onErrorContinue() { 1302 Flux\u0026lt;String\u0026gt; flux = 1303 Flux.just(\u0026#34;a\u0026#34;, \u0026#34;b\u0026#34;, \u0026#34;c\u0026#34;) 1304 .map(e -\u0026gt; { 1305 if (e.equals(\u0026#34;b\u0026#34;)) { 1306 throw new RuntimeException(\u0026#34;error\u0026#34;); 1307 } 1308 return e; 1309 }) 1310 .concatWith(Mono.just(\u0026#34;d\u0026#34;)) 1311 .onErrorContinue((ex, value) -\u0026gt; { 1312 log.info(\u0026#34;Exception: {}\u0026#34;, ex); 1313 log.info(\u0026#34;value: {}\u0026#34;, value); 1314 }); 1315 StepVerifier.create(flux) 1316 .expectNext(\u0026#34;a\u0026#34;, \u0026#34;c\u0026#34;, \u0026#34;d\u0026#34;) 1317 .verifyComplete(); 1318 } 1319 1320 /** 1321 * ******************************************************************** 1322 * Error - Action 1323 * doOnError - log the error, Side-effect operator. 1324 * ******************************************************************** 1325 */ 1326 @Test 1327 void test_doOnError() { 1328 Mono\u0026lt;Object\u0026gt; mono1 = Mono.error(new RuntimeException(\u0026#34;error\u0026#34;)) 1329 .doOnError(e -\u0026gt; log.error(\u0026#34;Error: {}\u0026#34;, e.getMessage())) 1330 .log(); 1331 StepVerifier.create(mono1) 1332 .expectError(RuntimeException.class) 1333 .verify(); 1334 } 1335 1336 /** 1337 * ******************************************************************** 1338 * Error - Action 1339 * onErrorMap - Transform an error emitted 1340 * ******************************************************************** 1341 */ 1342 @Test 1343 void test_onErrorMap() { 1344 Flux flux = Flux.just(\u0026#34;Jack\u0026#34;, \u0026#34;Jill\u0026#34;) 1345 .map(u -\u0026gt; { 1346 if (u.equals(\u0026#34;Jill\u0026#34;)) { 1347 //always do throw here, never do return. 1348 throw new IllegalArgumentException(\u0026#34;Not valid\u0026#34;); 1349 } 1350 if (u.equals(\u0026#34;Jack\u0026#34;)) { 1351 throw new ClassCastException(\u0026#34;Not valid\u0026#34;); 1352 } 1353 return u; 1354 }).onErrorMap(IllegalArgumentException.class, e -\u0026gt; { 1355 log.info(\u0026#34;Illegal Arg error\u0026#34;); 1356 throw new RuntimeException(\u0026#34;Illegal Arg error!\u0026#34;); 1357 }).onErrorMap(ClassCastException.class, e -\u0026gt; { 1358 log.info(\u0026#34;Class cast error\u0026#34;); 1359 throw new RuntimeException(\u0026#34;Class cast error!\u0026#34;); 1360 }); 1361 1362 StepVerifier.create(flux) 1363 .expectErrorMessage(\u0026#34;Class cast error!\u0026#34;) 1364 .verify(); 1365 } 1366 1367 /** 1368 * ******************************************************************** 1369 * retry 1370 * ******************************************************************** 1371 */ 1372 @Test 1373 void test_retry() { 1374 AtomicLong attemptCounter = new AtomicLong(); 1375 Mono\u0026lt;String\u0026gt; mono = Mono.just(\u0026#34;Jack\u0026#34;) 1376 .flatMap(n -\u0026gt; { 1377 return this.twoAttemptFunction(attemptCounter, n); 1378 }) 1379 .retry(3); 1380 StepVerifier.create(mono) 1381 .assertNext(e -\u0026gt; { 1382 assertThat(e).isEqualTo(\u0026#34;Hello Jack\u0026#34;); 1383 }) 1384 .verifyComplete(); 1385 } 1386 1387 private Mono\u0026lt;String\u0026gt; twoAttemptFunction(AtomicLong counter, String name) { 1388 Long attempt = counter.getAndIncrement(); 1389 log.info(\u0026#34;attempt value: {}\u0026#34;, attempt); 1390 if (attempt \u0026lt; 2) { 1391 throw new RuntimeException(\u0026#34;error\u0026#34;); 1392 } 1393 return Mono.just(\u0026#34;Hello \u0026#34; + name); 1394 } 1395 1396 /** 1397 * ******************************************************************** 1398 * retryWhen 1399 * ******************************************************************** 1400 */ 1401 @Test 1402 void test_retryWhen() { 1403 AtomicLong attemptCounter1 = new AtomicLong(); 1404 RetryBackoffSpec retryFilter1 = Retry.backoff(3, Duration.ofSeconds(1)) 1405 .filter(throwable -\u0026gt; throwable instanceof RuntimeException); 1406 1407 Mono\u0026lt;String\u0026gt; mono1 = Mono.just(\u0026#34;Jack\u0026#34;) 1408 .flatMap(e -\u0026gt; this.greetAfter2Failure(attemptCounter1, e)) 1409 .retryWhen(retryFilter1); 1410 StepVerifier.create(mono1) 1411 .assertNext(e -\u0026gt; { 1412 assertThat(e).isEqualTo(\u0026#34;Hello Jack\u0026#34;); 1413 }) 1414 .verifyComplete(); 1415 1416 AtomicLong attemptCounter2 = new AtomicLong(); 1417 RetryBackoffSpec retryFilter2 = Retry.fixedDelay(1, Duration.ofSeconds(1)) 1418 .filter(throwable -\u0026gt; throwable instanceof RuntimeException) 1419 .onRetryExhaustedThrow(((retryBackoffSpec, retrySignal) -\u0026gt; 1420 Exceptions.propagate(retrySignal.failure()) 1421 )); 1422 Mono\u0026lt;String\u0026gt; mono2 = Mono.just(\u0026#34;Jack\u0026#34;) 1423 .flatMap(e -\u0026gt; this.greetAfter2Failure(attemptCounter2, e)) 1424 .retryWhen(retryFilter2); 1425 StepVerifier.create(mono2) 1426 .expectErrorMessage(\u0026#34;error\u0026#34;) 1427 .verify(); 1428 } 1429 1430 private Mono\u0026lt;String\u0026gt; greetAfter2Failure(AtomicLong attemptCounter, String name) { 1431 Long attempt = attemptCounter.getAndIncrement(); 1432 log.info(\u0026#34;attempt value: {}\u0026#34;, attempt); 1433 if (attempt \u0026lt; 2) { 1434 throw new RuntimeException(\u0026#34;error\u0026#34;); 1435 } 1436 return Mono.just(\u0026#34;Hello \u0026#34; + name); 1437 } 1438 1439 /** 1440 * ******************************************************************** 1441 * repeat - repeat an operation n times. 1442 * ******************************************************************** 1443 */ 1444 @Test 1445 void test_repeat() { 1446 Mono\u0026lt;List\u0026lt;String\u0026gt;\u0026gt; flux = Mono.defer(() -\u0026gt; { 1447 return Mono.just(\u0026#34;UUID \u0026#34; + UUID.randomUUID()); 1448 }) 1449 .repeat(5) 1450 .collectList(); 1451 flux.subscribe(System.out::println); 1452 1453 StepVerifier.create(flux) 1454 .assertNext(e -\u0026gt; { 1455 assertThat(e.size()).isEqualTo(6); 1456 }) 1457 .verifyComplete(); 1458 } 1459 1460 @Test 1461 void test_takeUntil() { 1462 AtomicLong counter = new AtomicLong(); 1463 Mono\u0026lt;List\u0026lt;String\u0026gt;\u0026gt; flux = Mono.defer(() -\u0026gt; { 1464 return Mono.just(\u0026#34;UUID \u0026#34; + UUID.randomUUID()); 1465 }) 1466 .repeat() 1467 .takeUntil(e -\u0026gt; { 1468 return counter.incrementAndGet() == 5; 1469 }) 1470 .collectList(); 1471 1472 StepVerifier.create(flux) 1473 .assertNext(e -\u0026gt; { 1474 assertThat(e.size()).isEqualTo(5); 1475 }) 1476 .verifyComplete(); 1477 } 1478 1479 /** 1480 * ******************************************************************** 1481 * Subscribe onComplete, onError 1482 * Never use this format of subscribe code, always use doOn operator 1483 * ******************************************************************** 1484 */ 1485 @Test 1486 void test_doOn() { 1487 Flux\u0026lt;Integer\u0026gt; numFlux = Flux.range(1, 5) 1488 .map(i -\u0026gt; { 1489 if (i == 4) { 1490 throw new RuntimeException(\u0026#34;error\u0026#34;); 1491 } 1492 return i; 1493 }); 1494 numFlux.subscribe(s -\u0026gt; { 1495 log.info(\u0026#34;Number: {}\u0026#34;, s); 1496 }, 1497 Throwable::printStackTrace, 1498 () -\u0026gt; { 1499 log.info(\u0026#34;Done!\u0026#34;); 1500 }); 1501 StepVerifier.create(numFlux) 1502 .expectNext(1, 2, 3) 1503 .expectError(RuntimeException.class) 1504 .verify(); 1505 } 1506 1507 /** 1508 * ******************************************************************** 1509 * doOn - doOnSubscribe, doOnNext, doOnError, doFinally, doOnComplete 1510 * ******************************************************************** 1511 */ 1512 @Test 1513 void test_test_doOn_2() { 1514 Flux\u0026lt;Object\u0026gt; flux = Flux.error(new RuntimeException(\u0026#34;error\u0026#34;)) 1515 .doOnSubscribe(s -\u0026gt; System.out.println(\u0026#34;Subscribed!\u0026#34;)) 1516 .doOnRequest(s -\u0026gt; System.out.println(\u0026#34;Requested!\u0026#34;)) 1517 .doOnNext(p -\u0026gt; System.out.println(\u0026#34;Next!\u0026#34;)) 1518 .doOnComplete(() -\u0026gt; System.out.println(\u0026#34;Completed!\u0026#34;)) 1519 .doFinally((e) -\u0026gt; System.out.println(\u0026#34;Signal: \u0026#34; + e)) 1520 .doOnError((e) -\u0026gt; System.out.println(\u0026#34;Error: \u0026#34; + e)); 1521 1522 StepVerifier.create(flux) 1523 .expectError(RuntimeException.class) 1524 .verify(); 1525 1526 StepVerifier.create(flux) 1527 .verifyError(RuntimeException.class); 1528 1529 Mono\u0026lt;Object\u0026gt; mono = Mono.error(new RuntimeException(\u0026#34;error\u0026#34;)) 1530 .doOnSubscribe(s -\u0026gt; System.out.println(\u0026#34;Subscribed!\u0026#34;)) 1531 .doOnRequest(s -\u0026gt; System.out.println(\u0026#34;Requested!\u0026#34;)) 1532 .doOnNext(p -\u0026gt; System.out.println(\u0026#34;Next!\u0026#34;)) 1533 .doFinally((e) -\u0026gt; System.out.println(\u0026#34;Signal: \u0026#34; + e)) 1534 .doOnError((e) -\u0026gt; System.out.println(\u0026#34;Error: \u0026#34; + e)) 1535 .doOnSuccess((e) -\u0026gt; System.out.println(\u0026#34;Success!\u0026#34;)); 1536 1537 StepVerifier.create(mono) 1538 .expectError(RuntimeException.class) 1539 .verify(); 1540 } 1541 1542 @Test 1543 void test_doOn_3() { 1544 Flux flux = Flux.error(new RuntimeException(\u0026#34;My Error\u0026#34;)); 1545 flux.subscribe( 1546 onNext(), 1547 onError(), 1548 onComplete() 1549 ); 1550 } 1551 1552 private Consumer\u0026lt;Object\u0026gt; onNext() { 1553 return o -\u0026gt; System.out.println(\u0026#34;Received : \u0026#34; + o); 1554 } 1555 1556 private Consumer\u0026lt;Throwable\u0026gt; onError() { 1557 return e -\u0026gt; System.out.println(\u0026#34;ERROR : \u0026#34; + e.getMessage()); 1558 } 1559 1560 private Runnable onComplete() { 1561 return () -\u0026gt; System.out.println(\u0026#34;Completed\u0026#34;); 1562 } 1563 1564 /** 1565 * ******************************************************************** 1566 * StepVerifier - assertNext, thenRequest, thenCancel, expectError, expectErrorMessage 1567 * ******************************************************************** 1568 */ 1569 @Test 1570 void test_StepVerifier() { 1571 Flux flux1 = Flux.fromIterable(Arrays.asList(\u0026#34;Jack\u0026#34;, \u0026#34;Jill\u0026#34;)); 1572 StepVerifier.create(flux1) 1573 .expectNextMatches(user -\u0026gt; user.equals(\u0026#34;Jack\u0026#34;)) 1574 .assertNext(user -\u0026gt; assertThat(user).isEqualTo(\u0026#34;Jill\u0026#34;)) 1575 .verifyComplete(); 1576 1577 //Wait for 2 elements. 1578 StepVerifier.create(flux1) 1579 .expectNextCount(2) 1580 .verifyComplete(); 1581 1582 //Request 1 value at a time, get 2 values then cancel. 1583 Flux flux2 = Flux.fromIterable(Arrays.asList(\u0026#34;Jack\u0026#34;, \u0026#34;Jill\u0026#34;, \u0026#34;Raj\u0026#34;)); 1584 StepVerifier.create(flux2, 1) 1585 .expectNext(\u0026#34;JACK\u0026#34;) 1586 .thenRequest(1) 1587 .expectNext(\u0026#34;JILL\u0026#34;) 1588 .thenCancel(); 1589 1590 Mono\u0026lt;Object\u0026gt; mono1 = Mono.error(new RuntimeException(\u0026#34;My Error\u0026#34;)); 1591 StepVerifier.create(mono1) 1592 .expectError(RuntimeException.class) 1593 .verify(); 1594 StepVerifier.create(mono1) 1595 .expectErrorMessage(\u0026#34;My Error\u0026#34;) 1596 .verify(); 1597 } 1598 1599 /** 1600 * ******************************************************************** 1601 * flux error propagate 1602 * ******************************************************************** 1603 */ 1604 @Test 1605 void test_propagate() { 1606 Flux flux = Flux.just(\u0026#34;Jack\u0026#34;, \u0026#34;Jill\u0026#34;) 1607 .map(u -\u0026gt; { 1608 try { 1609 return HelperUtil.checkName(u); 1610 } catch (HelperUtil.CustomException e) { 1611 throw Exceptions.propagate(e); 1612 } 1613 }); 1614 flux.subscribe(System.out::println); 1615 StepVerifier.create(flux) 1616 .expectNext(\u0026#34;JACK\u0026#34;) 1617 .verifyError(HelperUtil.CustomException.class); 1618 } 1619 1620 /** 1621 * ******************************************************************** 1622 * subscribeOn - influences upstream (whole chain) 1623 * ******************************************************************** 1624 */ 1625 @Test 1626 void test_subscribeOn() { 1627 Flux numbFlux = Flux.range(1, 5) 1628 .map(i -\u0026gt; { 1629 log.info(\u0026#34;Map1 Num: {}, Thread: {}\u0026#34;, i, Thread.currentThread().getName()); 1630 return i; 1631 }).subscribeOn(Schedulers.single()) 1632 .map(i -\u0026gt; { 1633 log.info(\u0026#34;Map2 Num: {}, Thread: {}\u0026#34;, i, Thread.currentThread().getName()); 1634 return i; 1635 }); 1636 numbFlux.subscribe(); 1637 } 1638 1639 @SneakyThrows 1640 @Test 1641 void test_subscribeOn_t1() { 1642 Flux\u0026lt;Integer\u0026gt; flux1 = Flux.range(0, 2) 1643 .map(i -\u0026gt; { 1644 //will run on incoming thread 1645 log.info(\u0026#34;Mapping for \u0026#34; + i + \u0026#34; is done by thread \u0026#34; + Thread.currentThread().getName()); 1646 return i; 1647 }); 1648 Runnable r1 = () -\u0026gt; flux1.subscribe(s -\u0026gt; { 1649 log.info(\u0026#34;Received \u0026#34; + s + \u0026#34; via \u0026#34; + Thread.currentThread().getName()); 1650 }); 1651 Thread t1 = new Thread(r1, \u0026#34;t1\u0026#34;); 1652 log.info(\u0026#34;Program thread :: \u0026#34; + Thread.currentThread().getName()); 1653 t1.start(); 1654 t1.join(); 1655 } 1656 1657 @SneakyThrows 1658 @Test 1659 void test_subscribeOn_t2() { 1660 Flux\u0026lt;Integer\u0026gt; flux2 = Flux.range(0, 2) 1661 .map(i -\u0026gt; { 1662 //will run on incoming thread 1663 log.info(\u0026#34;Upstream: Mapping for {} is done by thread {}\u0026#34;, i, Thread.currentThread().getName()); 1664 return i; 1665 }) 1666 .publishOn(Schedulers.single()) 1667 .map(i -\u0026gt; { 1668 //will run on new thread 1669 log.info(\u0026#34;Downstream: Mapping for {} is done by thread {}\u0026#34;, i, Thread.currentThread().getName()); 1670 return i; 1671 }); 1672 Runnable r2 = () -\u0026gt; flux2.subscribe(s -\u0026gt; { 1673 log.info(\u0026#34;Received {} via {}\u0026#34;, s, Thread.currentThread().getName()); 1674 }); 1675 Thread t2 = new Thread(r2, \u0026#34;t2\u0026#34;); 1676 log.info(\u0026#34;Program thread {}\u0026#34; + Thread.currentThread().getName()); 1677 t2.start(); 1678 t2.join(); 1679 } 1680 1681 @SneakyThrows 1682 @Test 1683 void test_subscribeOn_t3() { 1684 Flux\u0026lt;Integer\u0026gt; flux3 = Flux.range(0, 2) 1685 .map(i -\u0026gt; { 1686 //will run on new thread 1687 log.info(\u0026#34;Upstream: Mapping for {} is done by thread {}\u0026#34;, i, Thread.currentThread().getName()); 1688 return i; 1689 }) 1690 .subscribeOn(Schedulers.single()) 1691 .map(i -\u0026gt; { 1692 //will run on new thread 1693 log.info(\u0026#34;Downstream: Mapping for {} is done by thread {}\u0026#34;, i, Thread.currentThread().getName()); 1694 return i; 1695 }); 1696 Runnable r3 = () -\u0026gt; flux3.subscribe(s -\u0026gt; { 1697 log.info(\u0026#34;Received {} via {}\u0026#34;, s, Thread.currentThread().getName()); 1698 }); 1699 Thread t3 = new Thread(r3, \u0026#34;t2\u0026#34;); 1700 log.info(\u0026#34;Program thread {}\u0026#34; + Thread.currentThread().getName()); 1701 t3.start(); 1702 t3.join(); 1703 } 1704 1705 /** 1706 * ******************************************************************** 1707 * Schedulers 1708 * 1709 * parallel - for CPU intensive tasks (computation), thread pool workers = number of CPU cores 1710 * newParallel - same as above but new pool 1711 * boundedElastic - for IO intensive tasks (network calls), thread pool contains 10 * number of CPU cores 1712 * newBoundedElastic - same as above but new pool 1713 * immediate - keep the execution in the current thread 1714 * single - single reusable thread for all the callers 1715 * newSingle - same as above but new pool 1716 * elastic - unlimited threads (DON\u0026#39;T USE) 1717 * 1718 * We can have multiple publishOn methods which will keep switching the context. 1719 * The subscribeOn method can not do that. Only the very first subscribeOn method which is close to the source takes precedence. 1720 * ******************************************************************** 1721 */ 1722 @Test 1723 void test_Schedulers() { 1724 Flux numbFlux = Flux.range(1, 5) 1725 .map(i -\u0026gt; { 1726 log.info(\u0026#34;Map1 Num: {}, Thread: {}\u0026#34;, i, Thread.currentThread().getName()); 1727 return i; 1728 }).subscribeOn(Schedulers.newSingle(\u0026#34;my-thread\u0026#34;)) 1729 .map(i -\u0026gt; { 1730 log.info(\u0026#34;Map2 Num: {}, Thread: {}\u0026#34;, i, Thread.currentThread().getName()); 1731 return i; 1732 }); 1733 numbFlux.subscribe(); 1734 } 1735 1736 /** 1737 * ******************************************************************** 1738 * publishOn - influences downstream 1739 * ******************************************************************** 1740 */ 1741 @Test 1742 void test_publishOn() { 1743 Flux numbFlux = Flux.range(1, 5) 1744 .map(i -\u0026gt; { 1745 log.info(\u0026#34;Map1 Num: {}, Thread: {}\u0026#34;, i, Thread.currentThread().getName()); 1746 return i; 1747 }).publishOn(Schedulers.single()) 1748 .map(i -\u0026gt; { 1749 log.info(\u0026#34;Map2 Num: {}, Thread: {}\u0026#34;, i, Thread.currentThread().getName()); 1750 return i; 1751 }); 1752 numbFlux.subscribe(); 1753 } 1754 1755 /** 1756 * ******************************************************************** 1757 * publishOn - influences downstream 1758 * ******************************************************************** 1759 */ 1760 @Test 1761 void test_publishOn_2() { 1762 Flux numbFlux = Flux.range(1, 5) 1763 .map(i -\u0026gt; { 1764 log.info(\u0026#34;Map1 Num: {}, Thread: {}\u0026#34;, i, Thread.currentThread().getName()); 1765 return i; 1766 }).publishOn(Schedulers.newSingle(\u0026#34;my-thread\u0026#34;)) 1767 .map(i -\u0026gt; { 1768 log.info(\u0026#34;Map2 Num: {}, Thread: {}\u0026#34;, i, Thread.currentThread().getName()); 1769 return i; 1770 }); 1771 numbFlux.subscribe(); 1772 } 1773 1774 /** 1775 * ******************************************************************** 1776 * fromSupplier - returns a value 1777 * fromCallable - returns a value or exception 1778 * fromRunnable - doesnt return value 1779 * ******************************************************************** 1780 */ 1781 @Test 1782 public void test_fromSupplier() { 1783 Supplier\u0026lt;String\u0026gt; stringSupplier = () -\u0026gt; getName(); 1784 Mono\u0026lt;String\u0026gt; mono = Mono.fromSupplier(stringSupplier); 1785 1786 mono.subscribe(System.out::println); 1787 } 1788 1789 /** 1790 * ******************************************************************** 1791 * fromSupplier - returns a value 1792 * fromCallable - returns a value or exception 1793 * fromRunnable - doesnt return value 1794 * ******************************************************************** 1795 */ 1796 @Test 1797 public void test_fromCallable() { 1798 Callable\u0026lt;String\u0026gt; stringCallable = () -\u0026gt; getName(); 1799 Mono\u0026lt;String\u0026gt; mono = Mono.fromCallable(stringCallable) 1800 .subscribeOn(Schedulers.boundedElastic()); 1801 mono.subscribe(System.out::println); 1802 } 1803 1804 /** 1805 * ******************************************************************** 1806 * fromSupplier - returns a value 1807 * fromCallable - returns a value or exception 1808 * fromRunnable - doesnt return value 1809 * ******************************************************************** 1810 */ 1811 @Test 1812 public void test_fromRunnable() { 1813 Runnable stringCallable = () -\u0026gt; getName(); 1814 Mono\u0026lt;Object\u0026gt; mono = Mono.fromRunnable(stringCallable) 1815 .subscribeOn(Schedulers.boundedElastic()); 1816 mono.subscribe(System.out::println); 1817 } 1818 1819 /** 1820 * ******************************************************************** 1821 * fromCallable - read file may be blocking, we don\u0026#39;t want to block main thread. 1822 * ******************************************************************** 1823 */ 1824 @Test 1825 @SneakyThrows 1826 void test_readFile_fromCallable() { 1827 Mono\u0026lt;List\u0026lt;String\u0026gt;\u0026gt; listMono = Mono.fromCallable(() -\u0026gt; Files.readAllLines(Path.of(\u0026#34;src/test/resources/file.txt\u0026#34;))) 1828 .subscribeOn(Schedulers.boundedElastic()); 1829 listMono.subscribe(l -\u0026gt; log.info(\u0026#34;Lines: {}\u0026#34;, l)); 1830 1831 StepVerifier.create(listMono) 1832 .expectSubscription() 1833 .thenConsumeWhile(l -\u0026gt; { 1834 assertThat(l.isEmpty()).isFalse(); 1835 return true; 1836 }) 1837 .verifyComplete(); 1838 } 1839 1840 /** 1841 * ******************************************************************** 1842 * Flux.using( 1843 * resourceSupplier, 1844 * (resource) -\u0026gt; return Publisher, 1845 * (resource) -\u0026gt; clean this up 1846 * ) 1847 * 1848 * share() creates a hot publisher, else it would be a cold publisher. 1849 * Cold publisher would read the file for each subscriber – that would mean opening and reading the same file many times. 1850 * ******************************************************************** 1851 */ 1852 @Test 1853 void test_readFile_using() { 1854 Path filePath = Paths.get(\u0026#34;src/test/resources/file.txt\u0026#34;); 1855 Flux\u0026lt;String\u0026gt; fileFlux = Flux.using( 1856 () -\u0026gt; Files.lines(filePath), 1857 Flux::fromStream, 1858 Stream::close 1859 ); 1860 fileFlux.subscribe(l -\u0026gt; log.info(\u0026#34;Lines: {}\u0026#34;, l)); 1861 1862 Flux\u0026lt;String\u0026gt; fileFlux2 = fileFlux 1863 .subscribeOn(Schedulers.newParallel(\u0026#34;file-copy\u0026#34;, 3)) 1864 .share(); 1865 fileFlux2.subscribe(l -\u0026gt; log.info(\u0026#34;Lines: {}\u0026#34;, l)); 1866 } 1867 1868 /** 1869 * ******************************************************************** 1870 * ParallelFlux - Will complete in 1 sec even when 3 ops take 3 seconds in sequence 1871 * ******************************************************************** 1872 */ 1873 @Test 1874 void test_parallel() { 1875 log.info(\u0026#34;Cores: {}\u0026#34;, Runtime.getRuntime().availableProcessors()); 1876 ParallelFlux\u0026lt;String\u0026gt; flux1 = Flux.just(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;, \u0026#34;banana\u0026#34;) 1877 .parallel() 1878 .runOn(Schedulers.parallel()) 1879 .map(HelperUtil::capitalizeString); 1880 StepVerifier.create(flux1) 1881 .expectNextCount(3) 1882 .verifyComplete(); 1883 1884 1885 Flux\u0026lt;String\u0026gt; flux2 = Flux.just(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;, \u0026#34;banana\u0026#34;) 1886 .flatMap(name -\u0026gt; { 1887 return Mono.just(name) 1888 .map(HelperUtil::capitalizeString) 1889 .subscribeOn(Schedulers.parallel()); 1890 }); 1891 StepVerifier.create(flux2) 1892 .expectNextCount(3) 1893 .verifyComplete(); 1894 } 1895 1896 /** 1897 * ******************************************************************** 1898 * flatMap Parallelism - Will complete in 1 sec even when 3 ops take 3 seconds in sequence 1899 * ******************************************************************** 1900 */ 1901 @Test 1902 void test_parallel_2() { 1903 Flux\u0026lt;String\u0026gt; flux1 = Flux.just(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;, \u0026#34;banana\u0026#34;) 1904 .flatMap(name -\u0026gt; { 1905 return Mono.just(name) 1906 .map(HelperUtil::capitalizeString) 1907 .subscribeOn(Schedulers.parallel()); 1908 }); 1909 StepVerifier.create(flux1) 1910 .expectNextCount(3) 1911 .verifyComplete(); 1912 } 1913 1914 /** 1915 * ******************************************************************** 1916 * flatMap - fire-forget jobs with subscribe, Will run async jobs 1917 * ******************************************************************** 1918 */ 1919 @SneakyThrows 1920 @Test 1921 void fireForgetTest() { 1922 CountDownLatch latch = new CountDownLatch(3); 1923 Flux\u0026lt;Object\u0026gt; flux1 = Flux.just(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;, \u0026#34;banana\u0026#34;) 1924 .flatMap(fruit -\u0026gt; { 1925 Mono.just(fruit) 1926 .map(e -\u0026gt; HelperUtil.capitalizeStringLatch(e, latch)) 1927 .subscribeOn(Schedulers.parallel()) 1928 .subscribe(); 1929 return Mono.empty(); 1930 }); 1931 StepVerifier.create(flux1) 1932 .verifyComplete(); 1933 latch.await(5, TimeUnit.SECONDS); 1934 } 1935 1936 /** 1937 * ******************************************************************** 1938 * flatMapSequential - Maintains order but executes in parallel 1939 * ******************************************************************** 1940 */ 1941 @Test 1942 void test_flatMapSequential() { 1943 Flux\u0026lt;String\u0026gt; flux1 = Flux.just(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;, \u0026#34;banana\u0026#34;) 1944 .flatMapSequential(name -\u0026gt; { 1945 return Mono.just(name) 1946 .map(HelperUtil::capitalizeString) 1947 .subscribeOn(Schedulers.parallel()); 1948 }); 1949 StepVerifier.create(flux1) 1950 .expectNext(\u0026#34;APPLE\u0026#34;, \u0026#34;ORANGE\u0026#34;, \u0026#34;BANANA\u0026#34;) 1951 .verifyComplete(); 1952 } 1953 1954 /** 1955 * ******************************************************************** 1956 * flatMapSequential - Maintains order but executes in parallel 1957 * ******************************************************************** 1958 */ 1959 @Test 1960 void test_flatMapSequential_2() { 1961 Flux\u0026lt;String\u0026gt; flux1 = Flux.just(\u0026#34;apple\u0026#34;, \u0026#34;orange\u0026#34;, \u0026#34;banana\u0026#34;) 1962 .flatMapSequential(name -\u0026gt; { 1963 return Mono.just(name) 1964 .map(HelperUtil::capitalizeString) 1965 .subscribeOn(Schedulers.parallel()); 1966 }, 1) 1967 .log(); 1968 StepVerifier.create(flux1) 1969 .expectNext(\u0026#34;APPLE\u0026#34;, \u0026#34;ORANGE\u0026#34;, \u0026#34;BANANA\u0026#34;) 1970 .verifyComplete(); 1971 } 1972 1973 /** 1974 * ******************************************************************** 1975 * withVirtualTime - flux that emits every second. 1976 * interval - blocks thread, so you will have to use sleep to see the output 1977 * ******************************************************************** 1978 */ 1979 @Test 1980 @SneakyThrows 1981 void test_withVirtualTime() { 1982 VirtualTimeScheduler.getOrSet(); 1983 Flux\u0026lt;Long\u0026gt; intervalFlux = Flux.interval(Duration.ofSeconds(1)) 1984 .log() 1985 .take(10); 1986 intervalFlux.subscribe(i -\u0026gt; log.info(\u0026#34;Number: {}\u0026#34;, i)); 1987 TimeUnit.SECONDS.sleep(5); 1988 StepVerifier.withVirtualTime(() -\u0026gt; intervalFlux) 1989 .expectSubscription() 1990 .expectNoEvent(Duration.ofMillis(999)) 1991 .thenAwait(Duration.ofSeconds(5)) 1992 .expectNextCount(4) 1993 .thenCancel() 1994 .verify(); 1995 } 1996 1997 /** 1998 * ******************************************************************** 1999 * flux that emits every day. Use of virtual time to simulate days. 2000 * ******************************************************************** 2001 */ 2002 @Test 2003 @SneakyThrows 2004 void test_withVirtualTime_2() { 2005 VirtualTimeScheduler.getOrSet(); 2006 StepVerifier.withVirtualTime(this::getTake) 2007 .expectSubscription() 2008 .expectNoEvent(Duration.ofDays(1)) 2009 .thenAwait(Duration.ofDays(1)) 2010 .expectNext(0L) 2011 .thenAwait(Duration.ofDays(1)) 2012 .expectNext(1L) 2013 .thenCancel() 2014 .verify(); 2015 } 2016 2017 private Flux\u0026lt;Long\u0026gt; getTake() { 2018 return Flux.interval(Duration.ofDays(1)) 2019 .log() 2020 .take(10); 2021 } 2022 2023 /** 2024 * ******************************************************************** 2025 * then - will just replay the source terminal signal, resulting in a Mono\u0026lt;Void\u0026gt; to indicate that this never signals any onNext. 2026 * thenEmpty - not only returns a Mono\u0026lt;Void\u0026gt;, but it takes a Mono\u0026lt;Void\u0026gt; as a parameter. It represents a concatenation of the source completion signal then the second, empty Mono completion signal. In other words, it completes when A then B have both completed sequentially, and doesn\u0026#39;t emit data. 2027 * thenMany - waits for the source to complete then plays all the signals from its Publisher\u0026lt;R\u0026gt; parameter, resulting in a Flux\u0026lt;R\u0026gt; that will \u0026#34;pause\u0026#34; until the source completes, then emit the many elements from the provided publisher before replaying its completion signal as well. 2028 * ******************************************************************** 2029 */ 2030 @Test 2031 void test_thenManyChain() { 2032 Flux\u0026lt;String\u0026gt; names = Flux.just(\u0026#34;Jack\u0026#34;, \u0026#34;Jill\u0026#34;); 2033 names.map(String::toUpperCase) 2034 .thenMany(HelperUtil.deleteFromDb()) 2035 .thenMany(HelperUtil.saveToDb()) 2036 .subscribe(System.out::println); 2037 } 2038 2039 @Test 2040 void test_thenEmpty() { 2041 Flux\u0026lt;String\u0026gt; names = Flux.just(\u0026#34;Jack\u0026#34;, \u0026#34;Jill\u0026#34;); 2042 names.map(String::toUpperCase) 2043 .thenMany(HelperUtil.saveToDb()) 2044 .thenEmpty(HelperUtil.sendMail()) 2045 .subscribe(System.out::println); 2046 } 2047 2048 @Test 2049 void test_then() { 2050 Flux\u0026lt;String\u0026gt; names = Flux.just(\u0026#34;Jack\u0026#34;, \u0026#34;Jill\u0026#34;); 2051 names.map(String::toUpperCase) 2052 .thenMany(HelperUtil.saveToDb()) 2053 .then() 2054 .then(Mono.just(\u0026#34;Ram\u0026#34;)) 2055 .thenReturn(\u0026#34;Done!\u0026#34;) 2056 .subscribe(System.out::println); 2057 } 2058 2059 /** 2060 * ******************************************************************** 2061 * firstWithValue - first mono to return 2062 * ******************************************************************** 2063 */ 2064 @Test 2065 void test_monoFirst() { 2066 Mono\u0026lt;String\u0026gt; mono1 = Mono.just(\u0026#34;Jack\u0026#34;).delayElement(Duration.ofSeconds(1)); 2067 Mono\u0026lt;String\u0026gt; mono2 = Mono.just(\u0026#34;Jill\u0026#34;); 2068 //Return the mono which returns its value faster 2069 Mono\u0026lt;String\u0026gt; mono3 = Mono.firstWithValue(mono1, mono2); 2070 mono3.subscribe(System.out::println); 2071 StepVerifier.create(mono3) 2072 .expectNext(\u0026#34;Jill\u0026#34;) 2073 .verifyComplete(); 2074 } 2075 2076 /** 2077 * ******************************************************************** 2078 * buffer 2079 * ******************************************************************** 2080 */ 2081 @Test 2082 public void test_bufferGroup() { 2083 Flux\u0026lt;List\u0026lt;Integer\u0026gt;\u0026gt; flux1 = Flux 2084 .range(1, 7) 2085 .buffer(2); 2086 StepVerifier 2087 .create(flux1) 2088 .expectNext(Arrays.asList(1, 2)) 2089 .expectNext(Arrays.asList(3, 4)) 2090 .expectNext(Arrays.asList(5, 6)) 2091 .expectNext(Arrays.asList(7)) 2092 .verifyComplete(); 2093 } 2094 2095 @Test 2096 @SneakyThrows 2097 void test_tickClock() { 2098 Flux fastClock = Flux.interval(Duration.ofSeconds(1)).map(tick -\u0026gt; \u0026#34;fast tick \u0026#34; + tick); 2099 Flux slowClock = Flux.interval(Duration.ofSeconds(2)).map(tick -\u0026gt; \u0026#34;slow tick \u0026#34; + tick); 2100 Flux.merge(fastClock, slowClock).subscribe(System.out::println); 2101 TimeUnit.SECONDS.sleep(5); 2102 } 2103 2104 @Test 2105 @SneakyThrows 2106 public void test_tickMergeClock() { 2107 Flux fastClock = Flux.interval(Duration.ofSeconds(1)).map(tick -\u0026gt; \u0026#34;fast tick \u0026#34; + tick); 2108 Flux slowClock = Flux.interval(Duration.ofSeconds(2)).map(tick -\u0026gt; \u0026#34;slow tick \u0026#34; + tick); 2109 Flux clock = Flux.merge(slowClock, fastClock); 2110 Flux feed = Flux.interval(Duration.ofSeconds(1)).map(tick -\u0026gt; LocalTime.now()); 2111 clock.withLatestFrom(feed, (tick, time) -\u0026gt; tick + \u0026#34; \u0026#34; + time).subscribe(System.out::println); 2112 TimeUnit.SECONDS.sleep(15); 2113 } 2114 2115 @Test 2116 @SneakyThrows 2117 void test_tickZipClock() { 2118 Flux fastClock = Flux.interval(Duration.ofSeconds(1)).map(tick -\u0026gt; \u0026#34;fast tick \u0026#34; + tick); 2119 Flux slowClock = Flux.interval(Duration.ofSeconds(2)).map(tick -\u0026gt; \u0026#34;slow tick \u0026#34; + tick); 2120 fastClock.zipWith(slowClock, (tick, time) -\u0026gt; tick + \u0026#34; \u0026#34; + time).subscribe(System.out::println); 2121 TimeUnit.SECONDS.sleep(5); 2122 } 2123 2124 @Test 2125 @SneakyThrows 2126 void test_emitter() { 2127 MyFeed myFeed = new MyFeed(); 2128 Flux feedFlux = Flux.create(emmiter -\u0026gt; { 2129 myFeed.register(new MyListener() { 2130 @Override 2131 public void priceTick(String msg) { 2132 emmiter.next(msg); 2133 } 2134 2135 @Override 2136 public void error(Throwable error) { 2137 emmiter.error(error); 2138 } 2139 }); 2140 }, FluxSink.OverflowStrategy.LATEST); 2141 feedFlux.subscribe(System.out::println); 2142 TimeUnit.SECONDS.sleep(15); 2143 System.out.println(\u0026#34;Sending message!\u0026#34;); 2144 for (int i = 0; i \u0026lt; 10; i++) { 2145 myFeed.sendMessage(\u0026#34;HELLO_\u0026#34; + i); 2146 } 2147 } 2148 2149 /** 2150 * ******************************************************************** 2151 * cancel subscription 2152 * ******************************************************************** 2153 */ 2154 @Test 2155 void test_monoCancelSubscription() { 2156 Mono\u0026lt;String\u0026gt; helloMono = Mono.just(\u0026#34;Jack\u0026#34;) 2157 .log() 2158 .map(String::toUpperCase); 2159 helloMono.subscribe(s -\u0026gt; { 2160 log.info(\u0026#34;Got: {}\u0026#34;, s); 2161 }, 2162 Throwable::printStackTrace, 2163 () -\u0026gt; log.info(\u0026#34;Finished\u0026#34;), 2164 Subscription::cancel 2165 ); 2166 } 2167 2168 /** 2169 * ******************************************************************** 2170 * cancel subscription after n elements 2171 * ******************************************************************** 2172 */ 2173 @Test 2174 void test_request() { 2175 //Jill won\u0026#39;t be fetched as subscription will be cancelled after 2 elements 2176 Flux\u0026lt;String\u0026gt; namesMono = Flux.just(\u0026#34;Jack\u0026#34;, \u0026#34;Jane\u0026#34;, \u0026#34;Jill\u0026#34;) 2177 .log(); 2178 namesMono.subscribe(s -\u0026gt; { 2179 log.info(\u0026#34;Got: {}\u0026#34;, s); 2180 }, 2181 Throwable::printStackTrace, 2182 () -\u0026gt; log.info(\u0026#34;Finished\u0026#34;), 2183 subscription -\u0026gt; subscription.request(2)); 2184 } 2185 2186 /** 2187 * ******************************************************************** 2188 * backpressure 2189 * ******************************************************************** 2190 */ 2191 @Test 2192 void test_fluxBackPressure() { 2193 Flux\u0026lt;Integer\u0026gt; fluxNumber = Flux.range(1, 5).log(); 2194 2195 //Fetches 2 at a time. 2196 fluxNumber.subscribe(new BaseSubscriber\u0026lt;\u0026gt;() { 2197 private final int requestCount = 2; 2198 private int count = 0; 2199 2200 @Override 2201 protected void hookOnSubscribe(Subscription subscription) { 2202 request(requestCount); 2203 } 2204 2205 @Override 2206 protected void hookOnNext(Integer value) { 2207 count++; 2208 if (count \u0026gt;= requestCount) { 2209 count = 0; 2210 log.info(\u0026#34;requesting next batch!\u0026#34;); 2211 request(requestCount); 2212 } 2213 } 2214 }); 2215 } 2216 2217 /** 2218 * ******************************************************************** 2219 * onBackpressureDrop - fetches all in unbounded request, but stores in internal queue, drops elements not used 2220 * ******************************************************************** 2221 */ 2222 @Test 2223 void test_fluxBackPressureDrop() { 2224 Flux\u0026lt;Integer\u0026gt; fluxNumber = Flux.range(1, 15).log(); 2225 2226 //Fetches 2 at a time. 2227 fluxNumber 2228 .onBackpressureDrop(item -\u0026gt; { 2229 log.info(\u0026#34;Dropped {}\u0026#34;, item); 2230 }) 2231 .subscribe(new BaseSubscriber\u0026lt;\u0026gt;() { 2232 private final int requestCount = 2; 2233 private int count = 0; 2234 private int batch = 0; 2235 2236 @Override 2237 protected void hookOnSubscribe(Subscription subscription) { 2238 request(requestCount); 2239 } 2240 2241 @Override 2242 protected void hookOnNext(Integer value) { 2243 if (batch \u0026gt; 2) { 2244 return; 2245 } 2246 count++; 2247 if (count \u0026gt;= requestCount) { 2248 count = 0; 2249 batch++; 2250 log.info(\u0026#34;requesting next batch {}\u0026#34;, batch); 2251 request(requestCount); 2252 } 2253 2254 } 2255 }); 2256 } 2257 2258 /** 2259 * ******************************************************************** 2260 * onBackpressureBuffer - fetches all in unbounded request, but stores in internal queue, but doesnt drop unused items 2261 * ******************************************************************** 2262 */ 2263 @Test 2264 void test_fluxBackPressureBuffet() { 2265 Flux\u0026lt;Integer\u0026gt; fluxNumber = Flux.range(1, 15).log(); 2266 2267 //Fetches 2 at a time. 2268 fluxNumber 2269 .onBackpressureBuffer() 2270 .subscribe(new BaseSubscriber\u0026lt;\u0026gt;() { 2271 private final int requestCount = 2; 2272 private int count = 0; 2273 private int batch = 0; 2274 2275 @Override 2276 protected void hookOnSubscribe(Subscription subscription) { 2277 request(requestCount); 2278 } 2279 2280 @Override 2281 protected void hookOnNext(Integer value) { 2282 if (batch \u0026gt; 2) { 2283 return; 2284 } 2285 count++; 2286 if (count \u0026gt;= requestCount) { 2287 count = 0; 2288 batch++; 2289 log.info(\u0026#34;requesting next batch {}\u0026#34;, batch); 2290 request(requestCount); 2291 } 2292 2293 } 2294 }); 2295 } 2296 2297 /** 2298 * ******************************************************************** 2299 * onBackpressureError - To identify if receiver is overrun by items as producer is producing more elements than can be processed. 2300 * ******************************************************************** 2301 */ 2302 @Test 2303 void test_fluxBackPressureOnError() { 2304 Flux\u0026lt;Integer\u0026gt; fluxNumber = Flux.range(1, 15).log(); 2305 2306 //Fetches 2 at a time. 2307 fluxNumber 2308 .onBackpressureError() 2309 .subscribe(new BaseSubscriber\u0026lt;\u0026gt;() { 2310 private final int requestCount = 2; 2311 private int count = 0; 2312 private int batch = 0; 2313 2314 @Override 2315 protected void hookOnSubscribe(Subscription subscription) { 2316 request(requestCount); 2317 } 2318 2319 @Override 2320 protected void hookOnError(Throwable throwable) { 2321 log.error(\u0026#34;Error thrown is: {}\u0026#34;, throwable.getMessage()); 2322 } 2323 2324 @Override 2325 protected void hookOnNext(Integer value) { 2326 if (batch \u0026gt; 2) { 2327 return; 2328 } 2329 count++; 2330 if (count \u0026gt;= requestCount) { 2331 count = 0; 2332 batch++; 2333 log.info(\u0026#34;requesting next batch {}\u0026#34;, batch); 2334 request(requestCount); 2335 } 2336 2337 } 2338 }); 2339 } 2340 2341 /** 2342 * ******************************************************************** 2343 * backpressure - limit rate 2344 * ******************************************************************** 2345 */ 2346 @Test 2347 void test_fluxBackPressureLimitRate() { 2348 Flux\u0026lt;Integer\u0026gt; fluxNumber = Flux.range(1, 5) 2349 .log() 2350 .limitRate(3); 2351 StepVerifier.create(fluxNumber) 2352 .expectNext(1, 2, 3, 4, 5) 2353 .verifyComplete(); 2354 } 2355 2356 /** 2357 * ******************************************************************** 2358 * cold flux - producing/emitting only when a subscriber subscribes, generates new sets of values for each new subscription, eg: spotify 2359 * hot flux - emitting happens even there is no subscriber. All the subscribers get the value from the single data producer irrespective of the time they started subscribing, eg: radio 2360 * ******************************************************************** 2361 */ 2362 @Test 2363 @SneakyThrows 2364 void test_connectableFlux() { 2365 ConnectableFlux\u0026lt;Integer\u0026gt; connectableFlux = Flux.range(1, 10) 2366 .delayElements(Duration.ofSeconds(1)) 2367 .publish(); 2368 connectableFlux.connect(); 2369 2370 TimeUnit.SECONDS.sleep(3); 2371 connectableFlux.subscribe(i -\u0026gt; { 2372 log.info(\u0026#34;Sub1 Number: {}\u0026#34;, i); 2373 }); 2374 2375 TimeUnit.SECONDS.sleep(2); 2376 connectableFlux.subscribe(i -\u0026gt; { 2377 log.info(\u0026#34;Sub2 Number: {}\u0026#34;, i); 2378 }); 2379 2380 ConnectableFlux\u0026lt;Integer\u0026gt; connectableFlux2 = Flux.range(1, 10) 2381 .delayElements(Duration.ofSeconds(1)) 2382 .publish(); 2383 StepVerifier.create(connectableFlux2) 2384 .then(connectableFlux2::connect) 2385 .thenConsumeWhile(i -\u0026gt; i \u0026lt;= 5) 2386 .expectNext(6, 7, 8, 9, 10) 2387 .expectComplete() 2388 .verify(); 2389 } 2390 2391 /** 2392 * ******************************************************************** 2393 * hot flux - auto connect, min subscribers required before publisher emits 2394 * ******************************************************************** 2395 */ 2396 @Test 2397 @SneakyThrows 2398 void test_connectableAutoFlux() { 2399 //Hot Flux. 2400 Flux\u0026lt;Integer\u0026gt; connectableFlux = Flux.range(1, 5) 2401 .log() 2402 .delayElements(Duration.ofSeconds(1)) 2403 .publish() 2404 .autoConnect(2); 2405 2406 //2 subscribers 2407 StepVerifier.create(connectableFlux) 2408 .then(connectableFlux::subscribe) 2409 .expectNext(1, 2, 3, 4, 5) 2410 .expectComplete() 2411 .verify(); 2412 } 2413 2414 /** 2415 * ******************************************************************** 2416 * hot flux - ref count, if subscriber count goes down, publisher stops emitting 2417 * ******************************************************************** 2418 */ 2419 @Test 2420 @SneakyThrows 2421 void test_connectableFlux_1() { 2422 //Hot Flux. 2423 Flux\u0026lt;Integer\u0026gt; connectableFlux = Flux.range(1, 15) 2424 .delayElements(Duration.ofSeconds(1)) 2425 .doOnCancel(() -\u0026gt; { 2426 log.info(\u0026#34;Received cancel\u0026#34;); 2427 }) 2428 .publish() 2429 .refCount(2); 2430 2431 //Min 2 subscribers required 2432 Disposable subscribe1 = connectableFlux.subscribe(e -\u0026gt; log.info(\u0026#34;Sub1: \u0026#34; + e)); 2433 Disposable subscribe2 = connectableFlux.subscribe(e -\u0026gt; log.info(\u0026#34;Sub2: \u0026#34; + e)); 2434 TimeUnit.SECONDS.sleep(3); 2435 subscribe1.dispose(); 2436 subscribe2.dispose(); 2437 TimeUnit.SECONDS.sleep(5); 2438 } 2439 2440 /** 2441 * ******************************************************************** 2442 * defer 2443 * ******************************************************************** 2444 */ 2445 @Test 2446 @SneakyThrows 2447 void test_defer() { 2448 Mono\u0026lt;UUID\u0026gt; just = Mono.just(UUID.randomUUID()); 2449 Mono\u0026lt;UUID\u0026gt; deferJust = Mono.defer(() -\u0026gt; Mono.just(UUID.randomUUID())); 2450 2451 just.subscribe(l -\u0026gt; log.info(\u0026#34;UUID: {}\u0026#34;, l)); 2452 just.subscribe(l -\u0026gt; log.info(\u0026#34;UUID: {}\u0026#34;, l)); 2453 System.out.println(); 2454 deferJust.subscribe(l -\u0026gt; log.info(\u0026#34;UUID: {}\u0026#34;, l)); 2455 deferJust.subscribe(l -\u0026gt; log.info(\u0026#34;UUID: {}\u0026#34;, l)); 2456 } 2457 2458 /** 2459 * ******************************************************************** 2460 * onSchedulersHook - if you have to use thread local 2461 * ******************************************************************** 2462 */ 2463 @Test 2464 public void test_onScheduleHook() { 2465 Runnable stringCallable = () -\u0026gt; getName(); 2466 Schedulers.onScheduleHook(\u0026#34;myHook\u0026#34;, runnable -\u0026gt; { 2467 log.info(\u0026#34;before scheduled runnable\u0026#34;); 2468 return () -\u0026gt; { 2469 log.info(\u0026#34;before execution\u0026#34;); 2470 runnable.run(); 2471 log.info(\u0026#34;after execution\u0026#34;); 2472 }; 2473 }); 2474 Mono.just(\u0026#34;Hello world\u0026#34;) 2475 .subscribeOn(Schedulers.single()) 2476 .subscribe(System.out::println); 2477 } 2478 2479 /** 2480 * ******************************************************************** 2481 * checkpoint 2482 * ******************************************************************** 2483 */ 2484 @Test 2485 void test_checkpoint() { 2486 Flux flux = Flux.just(\u0026#34;Jack\u0026#34;, \u0026#34;Jill\u0026#34;, \u0026#34;Joe\u0026#34;) 2487 .checkpoint(\u0026#34;before uppercase\u0026#34;) 2488 .map(e -\u0026gt; e.toUpperCase()) 2489 .checkpoint(\u0026#34;after uppercase\u0026#34;) 2490 .filter(e -\u0026gt; e.length() \u0026gt; 3) 2491 .checkpoint(\u0026#34;after filter\u0026#34;) 2492 .map(e -\u0026gt; new RuntimeException(\u0026#34;Custom error!\u0026#34;)); 2493 flux.subscribe(System.out::println); 2494 } 2495 2496 /** 2497 * ******************************************************************** 2498 * checkpoint 2499 * ******************************************************************** 2500 */ 2501 @Test 2502 void flux_test_debugAgent() { 2503 ReactorDebugAgent.init(); 2504 ReactorDebugAgent.processExistingClasses(); 2505 Flux flux = Flux.just(\u0026#34;a\u0026#34;) 2506 .concatWith(Flux.error(new IllegalArgumentException(\u0026#34;My Error!\u0026#34;))) 2507 .onErrorMap(ex -\u0026gt; { 2508 log.error(\u0026#34;Exception: {}\u0026#34;, ex.getMessage()); 2509 return new IllegalStateException(\u0026#34;New Error!\u0026#34;); 2510 }); 2511 flux.subscribe(System.out::println); 2512 } 2513 2514 /** 2515 * ******************************************************************** 2516 * Flux.generate - programmatically create flux, synchronous, cant emit without downstream subscriber asking for it. 2517 * Flux.create - programmatically create flux, asynchronous, can emit more elements without downstream subscriber asking for it. 2518 * ******************************************************************** 2519 */ 2520 @Test 2521 void test_flux_generate() { 2522 Flux\u0026lt;Integer\u0026gt; flux = Flux.generate(() -\u0026gt; 1, (state, sink) -\u0026gt; { 2523 sink.next(state * 2); 2524 if (state == 10) { 2525 sink.complete(); 2526 } 2527 return state + 1; 2528 }); 2529 flux.subscribe(System.out::println); 2530 StepVerifier.create(flux) 2531 .expectNextCount(10) 2532 .verifyComplete(); 2533 System.out.println(); 2534 } 2535 2536 /** 2537 * ******************************************************************** 2538 * Flux.generate - programmatically create flux, synchronous 2539 * Flux.create - programmatically create flux, asynchronous 2540 * 2541 * buffer - buffer if downstream cant keep up 2542 * drop - drop if downstream cant keep up 2543 * error - singal error when downstream cant keep up 2544 * ignore - ignore downstream backpressure requests 2545 * latest - downstream will only get latest 2546 * ******************************************************************** 2547 */ 2548 @Test 2549 void test_flux_create() { 2550 List\u0026lt;String\u0026gt; names = Arrays.asList(\u0026#34;jack\u0026#34;, \u0026#34;jill\u0026#34;); 2551 Flux\u0026lt;String\u0026gt; flux = Flux.create(sink -\u0026gt; { 2552 names.forEach(sink::next); 2553 sink.complete(); 2554 }); 2555 2556 StepVerifier.create(flux) 2557 .expectNextCount(2) 2558 .verifyComplete(); 2559 2560 Flux\u0026lt;Integer\u0026gt; integerFlux = Flux.create((FluxSink\u0026lt;Integer\u0026gt; fluxSink) -\u0026gt; { 2561 IntStream.range(0, 5) 2562 .peek(i -\u0026gt; System.out.println(\u0026#34;going to emit - \u0026#34; + i)) 2563 .forEach(fluxSink::next); 2564 fluxSink.complete(); 2565 }); 2566 2567 StepVerifier.create(integerFlux) 2568 .expectNextCount(5) 2569 .verifyComplete(); 2570 2571 Flux\u0026lt;Integer\u0026gt; integerFlux2 = Flux.create((FluxSink\u0026lt;Integer\u0026gt; fluxSink) -\u0026gt; { 2572 IntStream.range(0, 5) 2573 .peek(i -\u0026gt; System.out.println(\u0026#34;going to emit - \u0026#34; + i)) 2574 .forEach(fluxSink::next); 2575 fluxSink.complete(); 2576 }, FluxSink.OverflowStrategy.DROP); 2577 2578 StepVerifier.create(integerFlux2) 2579 .expectNextCount(5) 2580 .verifyComplete(); 2581 } 2582 2583 @Test 2584 void test_chain() { 2585 CompanyVO request = new CompanyVO(); 2586 request.setName(\u0026#34;Twitter\u0026#34;); 2587 Mono.just(request) 2588 .map(HelperUtil::convertToEntity) 2589 .zipWith(HelperUtil.getNameSuffix(), HelperUtil::appendSuffix) 2590 .flatMap(HelperUtil::addCompanyOwner) 2591 .flatMap(HelperUtil::appendOrgIdToDepartment) 2592 .flatMap(HelperUtil::save) 2593 .subscribe(System.out::println); 2594 } 2595 2596 /** 2597 * ******************************************************************** 2598 * expand Finding the shortest path in a graph. Searching file system. Finding neighbor nodes in a network. 2599 * expandDeep Finding all possible combinations. 2600 * ******************************************************************** 2601 */ 2602 @Test 2603 void test_expand() { 2604 Employee CEO = new Employee(\u0026#34;CEO\u0026#34;); 2605 2606 // Directors reporting to CEO 2607 Employee directorA = new Employee(\u0026#34;Director of Dept A\u0026#34;); 2608 Employee directorB = new Employee(\u0026#34;Director of Dept B\u0026#34;); 2609 CEO.addDirectReports(directorA, directorB); 2610 2611 // Managers reporting to directors 2612 Employee managerA1 = new Employee(\u0026#34;Manager 1 of Dept A\u0026#34;); 2613 Employee managerA2 = new Employee(\u0026#34;Manager 2 of Dept A\u0026#34;); 2614 Employee managerB1 = new Employee(\u0026#34;Manager 1 of Dept B\u0026#34;); 2615 Employee managerB2 = new Employee(\u0026#34;Manager 2 of Dept B\u0026#34;); 2616 directorA.addDirectReports(managerA1, managerA2); 2617 directorB.addDirectReports(managerB1, managerB2); 2618 2619 Mono.fromSupplier(() -\u0026gt; CEO) 2620 .expand(this::getDirectReports) 2621 .subscribe(System.out::println); 2622 } 2623 2624 @Test 2625 void test_expandDeep() { 2626 Employee CEO = new Employee(\u0026#34;CEO\u0026#34;); 2627 2628 // Directors reporting to CEO 2629 Employee directorA = new Employee(\u0026#34;Director of Dept A\u0026#34;); 2630 Employee directorB = new Employee(\u0026#34;Director of Dept B\u0026#34;); 2631 CEO.addDirectReports(directorA, directorB); 2632 2633 // Managers reporting to directors 2634 Employee managerA1 = new Employee(\u0026#34;Manager 1 of Dept A\u0026#34;); 2635 Employee managerA2 = new Employee(\u0026#34;Manager 2 of Dept A\u0026#34;); 2636 Employee managerB1 = new Employee(\u0026#34;Manager 1 of Dept B\u0026#34;); 2637 Employee managerB2 = new Employee(\u0026#34;Manager 2 of Dept B\u0026#34;); 2638 directorA.addDirectReports(managerA1, managerA2); 2639 directorB.addDirectReports(managerB1, managerB2); 2640 2641 Mono.fromSupplier(() -\u0026gt; CEO) 2642 .expandDeep(this::getDirectReports) 2643 .subscribe(System.out::println); 2644 } 2645 2646 private Flux\u0026lt;Employee\u0026gt; getDirectReports(Employee employee) { 2647 return Flux.fromIterable(employee.getDirectReports()); 2648 } 2649 2650 @Test 2651 void test_fluxToMono() { 2652 Mono\u0026lt;List\u0026lt;String\u0026gt;\u0026gt; mono = Flux.just(\u0026#34;jack\u0026#34;, \u0026#34;raj\u0026#34;).collectList(); 2653 Flux\u0026lt;List\u0026lt;String\u0026gt;\u0026gt; flux = Flux.just(\u0026#34;jack\u0026#34;, \u0026#34;raj\u0026#34;).collectList().flatMapMany(Flux::just); 2654 2655 StepVerifier.create(mono) 2656 .expectNextCount(1) 2657 .verifyComplete(); 2658 2659 StepVerifier.create(flux) 2660 .expectNextCount(1) 2661 .verifyComplete(); 2662 2663 } 2664 2665 @Test 2666 void test_compareMapWithList() { 2667 List\u0026lt;String\u0026gt; colors = List.of(\u0026#34;red\u0026#34;, \u0026#34;blue\u0026#34;, \u0026#34;green\u0026#34;); 2668 Map\u0026lt;String, String\u0026gt; fruitMap = Map.of(\u0026#34;red\u0026#34;, \u0026#34;apple\u0026#34;, \u0026#34;green\u0026#34;, \u0026#34;grapes\u0026#34;); 2669 Mono\u0026lt;List\u0026lt;String\u0026gt;\u0026gt; flux1 = Mono.just(fruitMap) 2670 .flatMap(map -\u0026gt; { 2671 return Flux.fromIterable(colors) 2672 .flatMap(color -\u0026gt; { 2673 if (map.containsKey(color)) { 2674 return Mono.just(map.get(color)); 2675 } 2676 return Mono.empty(); 2677 }).collectList(); 2678 }); 2679 flux1.subscribe(System.out::println); 2680 2681 StepVerifier.create(flux1) 2682 .expectNext(List.of(\u0026#34;apple\u0026#34;, \u0026#34;grapes\u0026#34;)) 2683 .verifyComplete(); 2684 2685 Flux\u0026lt;String\u0026gt; flux2 = Mono.just(fruitMap) 2686 .flatMapMany(map -\u0026gt; 2687 Flux.fromIterable(colors) 2688 .flatMap(color -\u0026gt; { 2689 String fruit = fruitMap.get(color); 2690 return fruit != null ? Flux.just(fruit) : Flux.empty(); 2691 }) 2692 ); 2693 flux2.subscribe(System.out::println); 2694 StepVerifier.create(flux2) 2695 .expectNext(\u0026#34;apple\u0026#34;) 2696 .expectNext(\u0026#34;grapes\u0026#34;) 2697 .verifyComplete(); 2698 } 2699 2700 /** 2701 * ******************************************************************** 2702 * timeout - if response doesnt come in certain time then timeout. 2703 * ******************************************************************** 2704 */ 2705 @Test 2706 void test_timeout() { 2707 Mono\u0026lt;String\u0026gt; mono = Mono.just(\u0026#34;jack\u0026#34;) 2708 .delayElement(Duration.ofSeconds(5)) 2709 .timeout(Duration.ofSeconds(1)) 2710 .onErrorReturn(\u0026#34;raj\u0026#34;); 2711 StepVerifier.create(mono) 2712 .expectNext(\u0026#34;raj\u0026#34;) 2713 .verifyComplete(); 2714 } 2715 2716 @Test 2717 void test_pageImpl() { 2718 List\u0026lt;String\u0026gt; names = List.of(\u0026#34;Jack\u0026#34;, \u0026#34;Raj\u0026#34;, \u0026#34;Edward\u0026#34;); 2719 PageRequest pageRequest = PageRequest.of(0, 5); 2720 Mono\u0026lt;PageImpl\u0026lt;String\u0026gt;\u0026gt; pageFlux = Flux.fromIterable(names) 2721 .collectList() 2722 .zipWith(Mono.just(names.size())) 2723 .map(t -\u0026gt; new PageImpl\u0026lt;\u0026gt;(t.getT1(), pageRequest, names.size())); 2724 2725 pageFlux.subscribe(System.out::println); 2726 2727 StepVerifier.create(pageFlux) 2728 .assertNext(e -\u0026gt; { 2729 assertEquals(e.getNumberOfElements(), 3); 2730 assertEquals(\u0026#34;Jack\u0026#34;, e.getContent().get(0)); 2731 }) 2732 .verifyComplete(); 2733 } 2734 2735 @Test 2736 void test_function() { 2737 Function\u0026lt;String, Mono\u0026lt;String\u0026gt;\u0026gt; stringSupplier = p -\u0026gt; Mono.just(\u0026#34;hello \u0026#34; + p); 2738 Mono\u0026lt;String\u0026gt; mono = Mono.defer(() -\u0026gt; stringSupplier.apply(\u0026#34;jack\u0026#34;)); 2739 StepVerifier.create(mono) 2740 .expectNext(\u0026#34;hello jack\u0026#34;) 2741 .verifyComplete(); 2742 } 2743 2744 @SneakyThrows 2745 @Test 2746 void test_blockHound() { 2747 try { 2748 FutureTask\u0026lt;?\u0026gt; task = new FutureTask\u0026lt;\u0026gt;(() -\u0026gt; { 2749 TimeUnit.SECONDS.sleep(2); 2750 return \u0026#34;\u0026#34;; 2751 }); 2752 Schedulers.parallel().schedule(task); 2753 task.get(10, TimeUnit.SECONDS); 2754 Assertions.fail(\u0026#34;should fail\u0026#34;); 2755 } catch (Exception e) { 2756 Assertions.assertTrue(e.getCause() instanceof BlockingOperationError); 2757 } 2758 } 2759 2760 @Test 2761 void test_blockHound2() { 2762 Mono\u0026lt;String\u0026gt; mono = Mono.just(\u0026#34;apple\u0026#34;) 2763 .flatMap(name -\u0026gt; { 2764 return Mono.just(\u0026#34;name\u0026#34;) 2765 .map(n -\u0026gt; { 2766 HelperUtil.sleep(1); 2767 return n; 2768 }) 2769 .subscribeOn(Schedulers.parallel()); 2770 }); 2771 2772 StepVerifier.create(mono) 2773 .expectError(BlockingOperationError.class); 2774 } 2775 2776} References https://projectreactor.io/\n","link":"https://gitorko.github.io/post/spring-reactor-basics/","section":"post","tags":["spring","spring-reactor"],"title":"Spring Reactor - Basics"},{"body":"","link":"https://gitorko.github.io/tags/spring-reactor/","section":"tags","tags":null,"title":"Spring-Reactor"},{"body":"","link":"https://gitorko.github.io/categories/spring-reactor/","section":"categories","tags":null,"title":"Spring-Reactor"},{"body":"Spring boot application integration with redis for messaging \u0026amp; data persistence.\nGithub: https://github.com/gitorko/project56\nRedis Redis can be used as an in-memory data store, database, cache, streaming engine, and message broker.\nCode 1package com.demo.project56.config; 2 3import com.demo.project56.service.MessageListener; 4import org.springframework.context.annotation.Bean; 5import org.springframework.context.annotation.Configuration; 6import org.springframework.data.redis.connection.RedisConnectionFactory; 7import org.springframework.data.redis.core.RedisTemplate; 8import org.springframework.data.redis.core.StringRedisTemplate; 9import org.springframework.data.redis.listener.ChannelTopic; 10import org.springframework.data.redis.listener.PatternTopic; 11import org.springframework.data.redis.listener.RedisMessageListenerContainer; 12import org.springframework.data.redis.listener.adapter.MessageListenerAdapter; 13import org.springframework.data.redis.serializer.StringRedisSerializer; 14 15@Configuration 16public class RedisConfiguration { 17 @Bean 18 public RedisTemplate\u0026lt;String, Long\u0026gt; redisTemplate(RedisConnectionFactory connectionFactory) { 19 RedisTemplate\u0026lt;String, Long\u0026gt; template = new RedisTemplate\u0026lt;\u0026gt;(); 20 template.setConnectionFactory(connectionFactory); 21 template.setDefaultSerializer(new StringRedisSerializer()); 22 return template; 23 } 24 25 @Bean 26 RedisMessageListenerContainer container(RedisConnectionFactory connectionFactory, MessageListenerAdapter listenerAdapter) { 27 RedisMessageListenerContainer container = new RedisMessageListenerContainer(); 28 container.setConnectionFactory(connectionFactory); 29 container.addMessageListener(listenerAdapter, new PatternTopic(\u0026#34;chat\u0026#34;)); 30 return container; 31 } 32 33 @Bean 34 MessageListenerAdapter listenerAdapter(MessageListener messageListener) { 35 return new MessageListenerAdapter(messageListener, \u0026#34;receiveMessage\u0026#34;); 36 } 37 38 @Bean 39 MessageListener messageConsumer() { 40 return new MessageListener(); 41 } 42 43 @Bean 44 StringRedisTemplate template(RedisConnectionFactory connectionFactory) { 45 return new StringRedisTemplate(connectionFactory); 46 } 47 48} 1package com.demo.project56.controller; 2 3import com.demo.project56.domain.Customer; 4import com.demo.project56.repository.CustomerRepository; 5import lombok.RequiredArgsConstructor; 6import lombok.extern.slf4j.Slf4j; 7import org.springframework.data.redis.core.RedisTemplate; 8import org.springframework.data.redis.core.StringRedisTemplate; 9import org.springframework.web.bind.annotation.GetMapping; 10import org.springframework.web.bind.annotation.PathVariable; 11import org.springframework.web.bind.annotation.PostMapping; 12import org.springframework.web.bind.annotation.RequestBody; 13import org.springframework.web.bind.annotation.RestController; 14 15@RestController 16@Slf4j 17@RequiredArgsConstructor 18public class HomeController { 19 20 private final RedisTemplate\u0026lt;String, Long\u0026gt; counterTemplate; 21 private final StringRedisTemplate chatTemplate; 22 private final CustomerRepository customerRepository; 23 24 @GetMapping(\u0026#34;/api/send/{message}\u0026#34;) 25 public void sendMessage(@PathVariable String message) { 26 log.info(\u0026#34;Sending message {}\u0026#34;, message); 27 chatTemplate.convertAndSend(\u0026#34;chat\u0026#34;, message); 28 } 29 30 @GetMapping(\u0026#34;/api/inc\u0026#34;) 31 public Long incrementCounter() { 32 return counterTemplate.opsForValue().increment(\u0026#34;chat\u0026#34;); 33 } 34 35 @PostMapping(\u0026#34;/api/customer\u0026#34;) 36 public Customer saveCustomer(@RequestBody Customer customer) { 37 return customerRepository.save(customer); 38 } 39 40 @GetMapping(\u0026#34;/api/customer\u0026#34;) 41 public Iterable\u0026lt;Customer\u0026gt; getCustomers() { 42 return customerRepository.findAll(); 43 } 44 45 @GetMapping(\u0026#34;/api/send-queue/{message}\u0026#34;) 46 public void sendToQueue(@PathVariable String message) { 47 log.info(\u0026#34;Sending to queue {}\u0026#34;, message); 48 chatTemplate.opsForList().leftPush(\u0026#34;app-key\u0026#34;, message); 49 } 50 51 @GetMapping(\u0026#34;/api/get-queue\u0026#34;) 52 public String getFromQueue() { 53 return chatTemplate.opsForList().leftPop(\u0026#34;app-key\u0026#34;); 54 } 55 56} Postman Import the postman collection to postman\nPostman Collection\nSetup 1# Project 56 2 3Spring \u0026amp; Redis (Messaging + Data) 4 5[https://gitorko.github.io/spring-redis/](https://gitorko.github.io/spring-redis/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk version \u0026#34;21.0.3\u0026#34; 2024-04-16 LTS 14``` 15 16### Redis 17 18```bash 19docker run --rm --name my-redis -p 6379:6379 -d redis redis-server --requirepass \u0026#34;password\u0026#34; 20 21``` 22 23To bring up Redis and Redis Commander UI 24 25```bash 26docker-compose -f docker/docker-compose-redis.yaml up 27``` 28 29Open Redis UI [http://localhost:8081/](http://localhost:8081/) 30 31### Dev 32 33To run the backend in dev mode. 34 35```bash 36./gradlew clean build 37./gradlew bootRun 38``` References https://spring.io/projects/spring-data-redis/\n","link":"https://gitorko.github.io/post/spring-redis/","section":"post","tags":["spring","spring-boot","redis"],"title":"Spring - Redis"},{"body":"Functional programming basics on how to use java lambda and functional interfaces\nGithub: https://github.com/gitorko/project83\nFunctional Programming Methods demonstrating use of functional programming\n1package com.demo.project83; 2 3import static com.demo.project83.common.HelperUtil.getCustomers; 4import static java.util.Comparator.comparing; 5import static java.util.function.Predicate.not; 6import static java.util.stream.Collectors.collectingAndThen; 7import static java.util.stream.Collectors.filtering; 8import static java.util.stream.Collectors.groupingBy; 9import static java.util.stream.Collectors.mapping; 10import static java.util.stream.Collectors.maxBy; 11import static java.util.stream.Collectors.toList; 12import static java.util.stream.Collectors.toSet; 13import static org.junit.jupiter.api.Assertions.assertEquals; 14import static org.junit.jupiter.api.Assertions.assertThrows; 15 16import java.math.BigInteger; 17import java.util.ArrayList; 18import java.util.Arrays; 19import java.util.HashMap; 20import java.util.LinkedHashMap; 21import java.util.List; 22import java.util.Map; 23import java.util.Optional; 24import java.util.OptionalInt; 25import java.util.Set; 26import java.util.function.BiFunction; 27import java.util.function.BinaryOperator; 28import java.util.function.Consumer; 29import java.util.function.Function; 30import java.util.function.IntFunction; 31import java.util.function.Predicate; 32import java.util.function.Supplier; 33import java.util.function.UnaryOperator; 34import java.util.stream.Collectors; 35import java.util.stream.IntStream; 36import java.util.stream.Stream; 37 38import com.demo.project83.common.Customer; 39import com.demo.project83.common.GreetingFunction; 40import org.junit.jupiter.api.Test; 41 42 43public class FunctionalTest { 44 45 /** 46 * ******************************************************************** 47 * Difference between imperative vs functional style 48 * ******************************************************************** 49 */ 50 @Test 51 public void imperativeVsFunctional() { 52 53 // Group all person by city in pre Java 8 world 54 Map\u0026lt;String, List\u0026lt;Customer\u0026gt;\u0026gt; personByCity1 = new HashMap\u0026lt;\u0026gt;(); 55 for (Customer p : getCustomers()) { 56 if (!personByCity1.containsKey(p.getCity())) { 57 personByCity1.put(p.getCity(), new ArrayList\u0026lt;\u0026gt;()); 58 } 59 personByCity1.get(p.getCity()).add(p); 60 } 61 System.out.println(\u0026#34;Person grouped by cities : \u0026#34; + personByCity1); 62 assertEquals(1, personByCity1.get(\u0026#34;rome\u0026#34;).size()); 63 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 64 65 // Group objects in Java 8 66 Map\u0026lt;String, List\u0026lt;Customer\u0026gt;\u0026gt; personByCity2 = getCustomers().stream() 67 .collect(groupingBy(Customer::getCity)); 68 System.out.println(\u0026#34;Person grouped by cities in Java 8: \u0026#34; + personByCity2); 69 assertEquals(1, personByCity2.get(\u0026#34;rome\u0026#34;).size()); 70 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 71 72 // Now let\u0026#39;s group person by age 73 Map\u0026lt;Integer, List\u0026lt;Customer\u0026gt;\u0026gt; personByAge = getCustomers().stream().collect(groupingBy(Customer::getAge)); 74 System.out.println(\u0026#34;Person grouped by age in Java 8: \u0026#34; + personByAge); 75 assertEquals(2, personByAge.get(32).size()); 76 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 77 } 78 79 /** 80 * ******************************************************************** 81 * Predicate \u0026lt;T\u0026gt; - takes T returns boolean 82 * ******************************************************************** 83 */ 84 @Test 85 public void predicateTest() { 86 Predicate\u0026lt;String\u0026gt; strlen = (s) -\u0026gt; s.length() \u0026lt; 10; 87 assertEquals(strlen.test(\u0026#34;Apples\u0026#34;), true); 88 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 89 } 90 91 /** 92 * ******************************************************************** 93 * Runnable - takes nothing returns nothing 94 * ******************************************************************** 95 */ 96 @Test 97 public void runnableTest() { 98 Runnable emptyConsumer = () -\u0026gt; System.out.println(\u0026#34;run 1\u0026#34;); 99 emptyConsumer.run(); 100 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 101 } 102 103 /** 104 * ******************************************************************** 105 * Consumer \u0026lt;T\u0026gt; - takes T returns nothing 106 * ******************************************************************** 107 */ 108 @Test 109 public void consumerTest() { 110 Consumer\u0026lt;String\u0026gt; consumerStr = (s) -\u0026gt; System.out.println(s.toUpperCase()); 111 consumerStr.accept(\u0026#34;peter parker\u0026#34;); 112 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 113 114 Consumer\u0026lt;String\u0026gt; hello = name -\u0026gt; System.out.println(\u0026#34;Hello, \u0026#34; + name); 115 getCustomers().forEach(c -\u0026gt; hello.accept(c.getName())); 116 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 117 118 //example of a lambda made from an instance method 119 Consumer\u0026lt;String\u0026gt; print = System.out::println; 120 print.accept(\u0026#34;Sent directly from a lambda...\u0026#34;); 121 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 122 123 //As anonymous class, dont use this, provided for explanation only. 124 getCustomers().forEach(new Consumer\u0026lt;Customer\u0026gt;() { 125 @Override 126 public void accept(Customer customer) { 127 System.out.println(\u0026#34;Hello \u0026#34; + customer.getName()); 128 } 129 }); 130 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 131 132 } 133 134 /** 135 * ******************************************************************** 136 * Function \u0026lt;T,R\u0026gt; - takes T returns R 137 * ******************************************************************** 138 */ 139 @Test 140 public void functionTest() { 141 //Function example 142 Function\u0026lt;Integer, String\u0026gt; convertNumToString = (num) -\u0026gt; Integer.toString(num); 143 System.out.println(\u0026#34;String value is : \u0026#34; + convertNumToString.apply(26)); 144 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 145 146 //lambdas made using a constructor 147 Function\u0026lt;String, BigInteger\u0026gt; newBigInt = BigInteger::new; 148 System.out.println(\u0026#34;Number \u0026#34; + newBigInt.apply(\u0026#34;123456789\u0026#34;)); 149 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 150 } 151 152 /** 153 * ******************************************************************** 154 * Supplier \u0026lt;T\u0026gt; - takes nothing returns T 155 * ******************************************************************** 156 */ 157 @Test 158 public void supplierTest() { 159 Supplier\u0026lt;String\u0026gt; s = () -\u0026gt; \u0026#34;Message from supplier\u0026#34;; 160 System.out.println(s.get()); 161 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 162 } 163 164 /** 165 * ******************************************************************** 166 * BinaryOperator \u0026lt;T\u0026gt; - takes T,T returns T 167 * ******************************************************************** 168 */ 169 @Test 170 public void binaryOperatorTest() { 171 BinaryOperator\u0026lt;Integer\u0026gt; add = (a, b) -\u0026gt; a + b; 172 System.out.println(\u0026#34;add 10 + 25: \u0026#34; + add.apply(10, 25)); 173 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 174 } 175 176 /** 177 * ******************************************************************** 178 * UnaryOperator \u0026lt;T\u0026gt; - takes T returns T 179 * ******************************************************************** 180 */ 181 @Test 182 public void unaryOperatorTest() { 183 UnaryOperator\u0026lt;String\u0026gt; str = (msg) -\u0026gt; msg.toUpperCase(); 184 System.out.println(str.apply(\u0026#34;hello, Joe\u0026#34;)); 185 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 186 187 //same example but using the static method concat 188 UnaryOperator\u0026lt;String\u0026gt; greeting = x -\u0026gt; \u0026#34;Hello, \u0026#34;.concat(x); 189 System.out.println(greeting.apply(\u0026#34;Raj\u0026#34;)); 190 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 191 192 UnaryOperator\u0026lt;String\u0026gt; makeGreeting = \u0026#34;Hello, \u0026#34;::concat; 193 System.out.println(makeGreeting.apply(\u0026#34;Peggy\u0026#34;)); 194 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 195 } 196 197 /** 198 * ******************************************************************** 199 * BiFunction \u0026lt;T,R,S\u0026gt; - takes T,R returns S 200 * ******************************************************************** 201 */ 202 @Test 203 public void biFunctionTest() { 204 BiFunction\u0026lt;Integer, Boolean, String\u0026gt; concat = (a, b) -\u0026gt; a.toString() + b.toString(); 205 System.out.println(concat.apply(23, true)); 206 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 207 } 208 209 /** 210 * ******************************************************************** 211 * Custom Functional Interface 212 * ******************************************************************** 213 */ 214 @Test 215 public void functionalInterfaceTest() { 216 GreetingFunction greeting = message -\u0026gt; 217 System.out.println(\u0026#34;Java Programming \u0026#34; + message); 218 greeting.sayMessage(\u0026#34;is awesome\u0026#34;); 219 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 220 } 221 222 /** 223 * ******************************************************************** 224 * IntFunction\u0026lt;T\u0026gt; - takes integer returns T 225 * ******************************************************************** 226 */ 227 @Test 228 public void intFunctionTest() { 229 IntFunction\u0026lt;String\u0026gt; intToString = num -\u0026gt; Integer.toString(num); 230 System.out.println(\u0026#34;String value of number: \u0026#34; + intToString.apply(123)); 231 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 232 233 //static method reference 234 IntFunction\u0026lt;String\u0026gt; intToString2 = Integer::toString; 235 System.out.println(\u0026#34;String value of number: \u0026#34; + intToString2.apply(4567)); 236 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 237 } 238 239 /** 240 * ******************************************************************** 241 * Higher order function - pass functions as arguments 242 * ******************************************************************** 243 */ 244 @Test 245 public void higherOrderTest() { 246 //Function takes Integer,Predicate and returns Predicate 247 //Function\u0026lt;T,R\u0026gt; 248 Function\u0026lt;Integer, Predicate\u0026lt;String\u0026gt;\u0026gt; checkLength = (minLen) -\u0026gt; { 249 //predicate returned 250 return (str) -\u0026gt; str.length() \u0026gt; minLen; 251 }; 252 List\u0026lt;String\u0026gt; collect = getCustomers().stream() 253 .map(Customer::getName) 254 .filter(checkLength.apply(4)) 255 .collect(toList()); 256 collect.forEach(System.out::println); 257 assertEquals(2, collect.size()); 258 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 259 } 260 261 /** 262 * ******************************************************************** 263 * collect - toList, joining, toCollection 264 * ******************************************************************** 265 */ 266 @Test 267 public void collectTest() { 268 //Collect customers who are below 30. 269 List\u0026lt;Customer\u0026gt; result = getCustomers().stream() 270 .filter(e -\u0026gt; e.getAge() \u0026lt; 30) 271 .collect(toList()); 272 assertEquals(1, result.size()); 273 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 274 275 //get all employee names in List\u0026lt;String\u0026gt; 276 //Using toCollection you can specify the type 277 ArrayList\u0026lt;String\u0026gt; result2 = getCustomers().stream() 278 .map(e -\u0026gt; e.getName()) 279 .collect(Collectors.toCollection(ArrayList::new)); 280 assertEquals(5, result2.size()); 281 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 282 283 //Collect and join to single string separated by coma. 284 String customerString = getCustomers().stream() 285 .filter(e -\u0026gt; e.getAge() \u0026gt; 30) 286 .map(e -\u0026gt; e.getName()) 287 .collect(Collectors.joining(\u0026#34;, \u0026#34;)); 288 System.out.println(customerString); 289 assertEquals(\u0026#34;jack, raj, peter, marie\u0026#34;, customerString); 290 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 291 292 } 293 294 /** 295 * ******************************************************************** 296 * collect - toMap 297 * ******************************************************************** 298 */ 299 @Test 300 void collectToMapTest() { 301 302 //Collect a map with name as key and age as value. 303 getCustomers().stream() 304 .filter(e -\u0026gt; e.getAge() \u0026gt; 30) 305 .collect(Collectors.toMap(Customer::getName, Customer::getAge)) 306 .forEach((k, v) -\u0026gt; System.out.println(k + \u0026#34;:\u0026#34; + v)); 307 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 308 309 //Collect a map by name + city as key customer as value 310 getCustomers().stream() 311 .collect(Collectors.toMap(c -\u0026gt; c.getName() + \u0026#34;-\u0026#34; + c.getCity(), c -\u0026gt; c)) 312 .forEach((k, v) -\u0026gt; System.out.println(k + \u0026#34;:\u0026#34; + v)); 313 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 314 } 315 316 /** 317 * ******************************************************************** 318 * collect - sort a Map by key or value 319 * ******************************************************************** 320 */ 321 @Test 322 public void sortMapTest() { 323 Map\u0026lt;String, Integer\u0026gt; map = new HashMap\u0026lt;\u0026gt;(); 324 map.put(\u0026#34;Niraj\u0026#34;, 6); 325 map.put(\u0026#34;Rahul\u0026#34;, 43); 326 map.put(\u0026#34;Ram\u0026#34;, 44); 327 map.put(\u0026#34;Sham\u0026#34;, 33); 328 map.put(\u0026#34;Pratik\u0026#34;, 5); 329 map.put(\u0026#34;Ashok\u0026#34;, 5); 330 331 //Sort map by Value Ascending order 332 Map\u0026lt;String, Integer\u0026gt; sortedMapByValueAscending = map.entrySet() 333 .stream() 334 .sorted(Map.Entry.comparingByValue()) 335 .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue, (e1, e2) -\u0026gt; e1, LinkedHashMap::new)); 336 System.out.println(sortedMapByValueAscending); 337 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 338 339 //Sort map by Value Descending order 340 Map\u0026lt;String, Integer\u0026gt; sortedMapByValueDescending = map.entrySet() 341 .stream() 342 .sorted(Map.Entry.\u0026lt;String, Integer\u0026gt;comparingByValue().reversed()) 343 .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue, (e1, e2) -\u0026gt; e1, LinkedHashMap::new)); 344 System.out.println(sortedMapByValueDescending); 345 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 346 347 //Sort map by Key Ascending order 348 Map\u0026lt;String, Integer\u0026gt; sortedMapByKeyAscending 349 = map.entrySet() 350 .stream().sorted(Map.Entry.comparingByKey()) 351 .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue, (e1, e2) -\u0026gt; e1, LinkedHashMap::new)); 352 System.out.println(sortedMapByKeyAscending); 353 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 354 355 //Sort map by Key Descending order 356 Map\u0026lt;String, Integer\u0026gt; sortedMapByKeyDescending 357 = map.entrySet() 358 .stream().sorted(Map.Entry.\u0026lt;String, Integer\u0026gt;comparingByKey().reversed()) 359 .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue, (e1, e2) -\u0026gt; e1, LinkedHashMap::new)); 360 System.out.println(sortedMapByKeyDescending); 361 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 362 } 363 364 /** 365 * ******************************************************************** 366 * collect - summingInt, sum 367 * ******************************************************************** 368 */ 369 @Test 370 public void collectSumTest() { 371 //Sum all ages. 372 int total = getCustomers().stream() 373 .collect(Collectors.summingInt(Customer::getAge)); 374 assertEquals(total, 163); 375 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 376 377 int total2 = getCustomers().stream() 378 .mapToInt(Customer::getAge) 379 .sum(); 380 assertEquals(total2, 163); 381 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 382 } 383 384 /** 385 * ******************************************************************** 386 * sorted 387 * ******************************************************************** 388 */ 389 @Test 390 public void sortedTest() { 391 392 List\u0026lt;String\u0026gt; sortResult = getCustomers().stream() 393 .map(c -\u0026gt; c.getName()) 394 .sorted((a, b) -\u0026gt; b.compareTo(a)) 395 .collect(toList()); 396 sortResult.forEach(System.out::println); 397 398 //Avoid using the below as it modifies the orignial list. 399 //Collections.sort(getCustomers(), (a, b) -\u0026gt; b.getName().compareTo(a.getName())); 400 401 List\u0026lt;String\u0026gt; expectedResult = List.of(\u0026#34;raj\u0026#34;, \u0026#34;peter\u0026#34;, \u0026#34;marie\u0026#34;, \u0026#34;joe\u0026#34;, \u0026#34;jack\u0026#34;); 402 assertEquals(expectedResult, sortResult); 403 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 404 405 } 406 407 /** 408 * ******************************************************************** 409 * filter 410 * ******************************************************************** 411 */ 412 @Test 413 public void filterTest() { 414 getCustomers().stream() 415 .filter(customer -\u0026gt; { 416 return customer.getName().startsWith(\u0026#34;P\u0026#34;); //predicate 417 }) 418 .forEach(System.out::println); 419 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 420 } 421 422 /** 423 * ******************************************************************** 424 * findFirst, ifPresent 425 * ******************************************************************** 426 */ 427 @Test 428 public void findFirstTest() { 429 getCustomers() 430 .stream() 431 .filter(customer -\u0026gt; customer.getName().startsWith(\u0026#34;P\u0026#34;)) 432 .findFirst() 433 .ifPresent(System.out::println); 434 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 435 } 436 437 /** 438 * ******************************************************************** 439 * mapToInt, max, average, IntStream 440 * ******************************************************************** 441 */ 442 @Test 443 public void mapToIntTest() { 444 int sum = getCustomers().stream() 445 .mapToInt(Customer::getAge) 446 .sum(); 447 System.out.println(sum); 448 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 449 450 //primitive streams 451 IntStream.range(1, 4) 452 .forEach(System.out::println); 453 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 454 455 //find the average of the numbers squared 456 Arrays.stream(new int[]{1, 2, 3, 4}) 457 .map(n -\u0026gt; n * n) 458 .average() 459 .ifPresent(System.out::println); 460 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 461 462 //map doubles to ints 463 Stream.of(1.5, 2.3, 3.7) 464 .mapToInt(Double::intValue) 465 .forEach(System.out::println); 466 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 467 468 //max of age 469 OptionalInt max = getCustomers().stream() 470 .mapToInt(Customer::getAge) 471 .max(); 472 System.out.println(max.getAsInt()); 473 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 474 475 } 476 477 /** 478 * ******************************************************************** 479 * thenComparing - double sort, sort on name, then sort on age 480 * ******************************************************************** 481 */ 482 @Test 483 public void doubleSortTest() { 484 //Sort customer by name and then by age. 485 getCustomers().stream() 486 .sorted( 487 comparing(Customer::getName) 488 .thenComparing(Customer::getAge) 489 ) 490 .forEach(System.out::println); 491 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 492 } 493 494 /** 495 * ******************************************************************** 496 * flatMap 497 * ******************************************************************** 498 */ 499 @Test 500 public void flatMapTest() { 501 //Get chars of all customer names. 502 Set\u0026lt;String\u0026gt; collect = getCustomers().stream() 503 .map(Customer::getName) 504 .flatMap(name -\u0026gt; Stream.of(name.split(\u0026#34;\u0026#34;))) 505 .collect(toSet()); 506 System.out.println(collect); 507 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 508 509 //one to many 510 List\u0026lt;Integer\u0026gt; nums = List.of(1, 2, 3); 511 List\u0026lt;Integer\u0026gt; collect2 = nums.stream() 512 .flatMap(e -\u0026gt; List.of(e, e + 1).stream()) 513 .collect(toList()); 514 System.out.println(collect2); 515 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 516 } 517 518 /** 519 * ******************************************************************** 520 * collect - groupBy, mapping, filtering, counting 521 * ******************************************************************** 522 */ 523 @Test 524 public void groupByTest() { 525 526 //group by name and get list of customers with same name. 527 Map\u0026lt;String, List\u0026lt;Customer\u0026gt;\u0026gt; result1 = getCustomers().stream() 528 .collect(groupingBy(Customer::getName)); 529 System.out.println(result1); 530 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 531 532 //group by name and get list of ages if customer with same name. 533 Map\u0026lt;String, List\u0026lt;Integer\u0026gt;\u0026gt; result2 = getCustomers().stream() 534 .collect( 535 groupingBy(Customer::getName, 536 mapping(Customer::getAge, toList()))); 537 System.out.println(result2); 538 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 539 540 //Group by age, employees who name is greater than 4 chars. 541 Map\u0026lt;Integer, List\u0026lt;String\u0026gt;\u0026gt; result3 = getCustomers().stream() 542 .collect( 543 groupingBy(Customer::getAge, 544 mapping( 545 Customer::getName, 546 filtering(name -\u0026gt; name.length() \u0026gt; 4, toList()) 547 )) 548 ); 549 System.out.println(result3); 550 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 551 552 //group by age all customers name 553 Map\u0026lt;Integer, List\u0026lt;String\u0026gt;\u0026gt; result4 = getCustomers().stream() 554 .collect( 555 groupingBy(Customer::getAge, 556 mapping(Customer::getName, toList())) 557 ); 558 System.out.println(result4); 559 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 560 561 //count emp with same name. 562 Map\u0026lt;String, Long\u0026gt; result5 = getCustomers().stream() 563 .collect(groupingBy(Customer::getName, Collectors.counting())); 564 System.out.println(result5); 565 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 566 567 } 568 569 /** 570 * ******************************************************************** 571 * maxBy - comparing, collectingAndThen 572 * ******************************************************************** 573 */ 574 @Test 575 public void maxByTest() { 576 //emp with max age 577 Optional\u0026lt;Customer\u0026gt; maxEmp = getCustomers().stream() 578 .collect(maxBy(comparing(Customer::getAge))); 579 System.out.println(maxEmp.get()); 580 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 581 582 //emp with max age and print name instead of emp. 583 String result = getCustomers().stream() 584 .collect(collectingAndThen( 585 maxBy(comparing(Customer::getAge)), 586 e -\u0026gt; e.map(Customer::getName).orElse(\u0026#34;\u0026#34;) 587 ) 588 ); 589 System.out.println(result); 590 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 591 592 } 593 594 /** 595 * ******************************************************************** 596 * collectingAndThen 597 * ******************************************************************** 598 */ 599 @Test 600 public void collectingAndThenTest() { 601 //convert long to int. 602 Map\u0026lt;String, Integer\u0026gt; result = getCustomers().stream() 603 .collect(groupingBy(Customer::getName, 604 collectingAndThen(Collectors.counting(), 605 Long::intValue 606 ))); 607 System.out.println(result); 608 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 609 } 610 611 /** 612 * ******************************************************************** 613 * partitioningBy - same as groupBy but always partitions into 2 parts 614 * ******************************************************************** 615 */ 616 @Test 617 public void partitioningByTest() { 618 //2 list of even odd employees 619 Map\u0026lt;Boolean, List\u0026lt;Customer\u0026gt;\u0026gt; result = getCustomers().stream() 620 .collect(Collectors.partitioningBy(p -\u0026gt; p.getAge() % 2 == 0)); 621 System.out.println(result); 622 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 623 } 624 625 /** 626 * ******************************************************************** 627 * reduce 628 * ******************************************************************** 629 */ 630 @Test 631 public void reduceTest() { 632 List\u0026lt;Integer\u0026gt; numLst = Arrays.asList(1, 2, 3, 4, 5, 6); 633 634 //Sum of integer array. (both are param) 635 Integer reduce = numLst.stream().reduce(0, (total, val) -\u0026gt; Integer.sum(total, val)); 636 System.out.println(\u0026#34;reduce = \u0026#34; + reduce); 637 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 638 639 reduce = numLst.stream().reduce(0, Integer::sum); 640 System.out.println(\u0026#34;reduce = \u0026#34; + reduce); 641 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 642 643 //Concat of string. (one is target, one is param) 644 String concat = numLst.stream().map(String::valueOf).reduce(\u0026#34;\u0026#34;, (carry, str) -\u0026gt; carry.concat(str)); 645 System.out.println(\u0026#34;concat = \u0026#34; + concat); 646 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 647 648 concat = numLst.stream().map(String::valueOf).reduce(\u0026#34;\u0026#34;, String::concat); 649 System.out.println(\u0026#34;concat = \u0026#34; + concat); 650 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 651 652 Integer sum = numLst.stream().filter(e -\u0026gt; e % 2 == 0).map(e -\u0026gt; e * 2).reduce(0, Integer::sum); 653 System.out.println(\u0026#34;sum = \u0026#34; + sum); 654 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 655 656 Integer sum2 = numLst.stream().filter(e -\u0026gt; e % 2 == 0).mapToInt(e -\u0026gt; e * 2).sum(); 657 System.out.println(\u0026#34;sum2 = \u0026#34; + sum2); 658 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 659 660 //Use reduce to collect to a list. Given only to explain, use toList in real world. 661 getCustomers().stream() 662 .filter(e -\u0026gt; e.getAge() \u0026gt; 30) 663 .map(e -\u0026gt; e.getName()) 664 .map(String::toUpperCase) 665 .reduce(new ArrayList\u0026lt;String\u0026gt;(), (names, name) -\u0026gt; { 666 names.add(name); 667 return names; 668 }, 669 (names1, names2) -\u0026gt; { 670 names1.addAll(names2); 671 return names1; 672 } 673 ).forEach(System.out::println); 674 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 675 } 676 677 /** 678 * ******************************************************************** 679 * ifPresent - findAny 680 * ******************************************************************** 681 */ 682 @Test 683 public void ifPresentTest() { 684 String input = \u0026#34;key:a,key:b,key:c,key:d\u0026#34;; 685 Optional.ofNullable(input) 686 .ifPresent(in -\u0026gt; Arrays.stream(in.split(\u0026#34;,\u0026#34;)) 687 .map(String::toLowerCase) 688 .peek(System.out::println) 689 .filter(not(match -\u0026gt; (match.startsWith(\u0026#34;key\u0026#34;)))) 690 .findAny() 691 .ifPresent(match -\u0026gt; new RuntimeException(\u0026#34;Pattern not valid!\u0026#34;))); 692 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 693 694 String input2 = \u0026#34;key:a,key:b,:c,key:d\u0026#34;; 695 assertThrows(RuntimeException.class, () -\u0026gt; { 696 Optional.ofNullable(input2) 697 .ifPresent(in -\u0026gt; Arrays.stream(in.split(\u0026#34;,\u0026#34;)) 698 .map(String::toLowerCase) 699 .peek(System.out::println) 700 .filter(not(match -\u0026gt; (match.startsWith(\u0026#34;key\u0026#34;)))) 701 .findAny() 702 .ifPresent(match -\u0026gt; { 703 System.out.println(\u0026#34;Here!\u0026#34;); 704 throw new RuntimeException(\u0026#34;Pattern not valid!\u0026#34;); 705 })); 706 }); 707 System.out.println(\u0026#34;---------------------------------------------------\u0026#34;); 708 } 709 710} References Java Lambda\n","link":"https://gitorko.github.io/post/functional-programming-basics/","section":"post","tags":["lambda","java8"],"title":"Functional Programming - Basics"},{"body":"","link":"https://gitorko.github.io/categories/functional-programming/","section":"categories","tags":null,"title":"Functional-Programming"},{"body":"","link":"https://gitorko.github.io/tags/java8/","section":"tags","tags":null,"title":"Java8"},{"body":"","link":"https://gitorko.github.io/tags/lambda/","section":"tags","tags":null,"title":"Lambda"},{"body":"","link":"https://gitorko.github.io/categories/angular/","section":"categories","tags":null,"title":"Angular"},{"body":"","link":"https://gitorko.github.io/tags/chart.js/","section":"tags","tags":null,"title":"Chart.js"},{"body":"Spring boot web application with angular and JWT authentication support, uses clarity for UI components and chart.js for rendering charts. Creates uber jar to deploy.\nGithub: https://github.com/gitorko/project88\nQuick Overview To deploy the application in a single command, clone the project, make sure no conflicting docker containers or ports are running and then run\n1git clone https://github.com/gitorko/project88 2cd project88 3docker-compose -f docker/docker-compose.yml up Open http://localhost:8080/\nFeatures A Spring Boot application with angular 11. Supports basic integration with spring security \u0026amp; JWT and provides login \u0026amp; logout support. Spring dev tools allow seamless reload on any changes for java files.\nAngular 11 app supports basic login via JWT Clarity JWT token based Login CRUD UI for adding and removing customer Postgres db Spring JPA Chart.js charts for bar,pie,stack charts with data from rest api Implementation Design Wrong credentials\nUser role cant delete the record, only admin role can delete the record\nCode On Intellij to allow spring dev tools to reload on change you need to enable 'Update classes and resources' as shown below\nRest API return data that is rendered in angular frontend.\n1package com.demo.project88.controller; 2 3import java.util.Date; 4 5import com.demo.project88.domain.Customer; 6import com.demo.project88.repo.CustomerRepository; 7import lombok.RequiredArgsConstructor; 8import lombok.extern.slf4j.Slf4j; 9import org.springframework.security.access.prepost.PreAuthorize; 10import org.springframework.web.bind.annotation.DeleteMapping; 11import org.springframework.web.bind.annotation.GetMapping; 12import org.springframework.web.bind.annotation.PathVariable; 13import org.springframework.web.bind.annotation.PostMapping; 14import org.springframework.web.bind.annotation.RequestBody; 15import org.springframework.web.bind.annotation.RestController; 16 17@RestController 18@Slf4j 19@RequiredArgsConstructor 20public class HomeController { 21 22 final CustomerRepository customerRepo; 23 24 @GetMapping(value = \u0026#34;/api/time\u0026#34;) 25 public Date serverTime() { 26 log.info(\u0026#34;Getting server time!\u0026#34;); 27 return new Date(); 28 } 29 30 @GetMapping(value = \u0026#34;/api/customer\u0026#34;) 31 @PreAuthorize(\u0026#34;hasRole(\u0026#39;ROLE_USER\u0026#39;) or hasRole(\u0026#39;ROLE_ADMIN\u0026#39;)\u0026#34;) 32 public Iterable\u0026lt;Customer\u0026gt; getCustomers() { 33 return customerRepo.findAll(); 34 } 35 36 @PreAuthorize(\u0026#34;hasRole(\u0026#39;ROLE_ADMIN\u0026#39;)\u0026#34;) 37 @PostMapping(value = \u0026#34;/api/customer\u0026#34;) 38 public Customer saveCustomer(@RequestBody Customer customer) { 39 log.info(\u0026#34;Saving customer!\u0026#34;); 40 return customerRepo.save(customer); 41 } 42 43 @PreAuthorize(\u0026#34;hasRole(\u0026#39;ROLE_ADMIN\u0026#39;)\u0026#34;) 44 @DeleteMapping(value = \u0026#34;/api/customer/{id}\u0026#34;) 45 public void deleteCustomer(@PathVariable Long id) { 46 log.info(\u0026#34;Deleting customer: {}\u0026#34;, id); 47 customerRepo.deleteById(id); 48 } 49 50} JWT authentication configured.\n1package com.demo.project88.security; 2 3import com.demo.project88.service.UserDetailsServiceImpl; 4import lombok.RequiredArgsConstructor; 5import org.springframework.context.annotation.Bean; 6import org.springframework.context.annotation.Configuration; 7import org.springframework.security.authentication.AuthenticationManager; 8import org.springframework.security.authentication.AuthenticationProvider; 9import org.springframework.security.authentication.dao.DaoAuthenticationProvider; 10import org.springframework.security.config.annotation.authentication.configuration.AuthenticationConfiguration; 11import org.springframework.security.config.annotation.method.configuration.EnableGlobalMethodSecurity; 12import org.springframework.security.config.annotation.web.builders.HttpSecurity; 13import org.springframework.security.config.annotation.web.configuration.EnableWebSecurity; 14import org.springframework.security.config.http.SessionCreationPolicy; 15import org.springframework.security.crypto.bcrypt.BCryptPasswordEncoder; 16import org.springframework.security.crypto.password.PasswordEncoder; 17import org.springframework.security.web.SecurityFilterChain; 18import org.springframework.security.web.authentication.UsernamePasswordAuthenticationFilter; 19 20@Configuration 21@EnableGlobalMethodSecurity(prePostEnabled = true) 22@RequiredArgsConstructor 23@EnableWebSecurity 24public class SecurityConfig { 25 26 public static final String USER_ROLE = \u0026#34;ADMIN\u0026#34;; 27 public static final String USER_NAME = \u0026#34;admin\u0026#34;; 28 public static final String USER_PASSWORD = \u0026#34;admin@123\u0026#34;; 29 final UserDetailsServiceImpl userDetailsService; 30 final JwtAuthEntryPoint authenticationEntryPoint; 31 32 @Bean 33 public PasswordEncoder passwordEncoder() { 34 return new BCryptPasswordEncoder(); 35 } 36 37 @Bean 38 public AuthenticationManager authenticationManager(AuthenticationConfiguration authenticationConfiguration) throws Exception { 39 return authenticationConfiguration.getAuthenticationManager(); 40 } 41 42 @Bean 43 public JwtTokenFilter jwtTokenFilter() { 44 return new JwtTokenFilter(); 45 } 46 47 @Bean 48 SecurityFilterChain securityFilterChain(HttpSecurity http) throws Exception { 49 http 50 .csrf(csrf -\u0026gt; csrf.disable()) 51 .exceptionHandling(e -\u0026gt; e.authenticationEntryPoint(authenticationEntryPoint)) 52 .sessionManagement(s -\u0026gt; s.sessionCreationPolicy(SessionCreationPolicy.STATELESS)) 53 .authorizeHttpRequests(authorize -\u0026gt; authorize 54 .requestMatchers(\u0026#34;/api/auth/**\u0026#34;).permitAll() 55 .requestMatchers(\u0026#34;/api/time\u0026#34;).permitAll() 56 .requestMatchers(\u0026#34;/api/**\u0026#34;).authenticated() 57 .anyRequest().permitAll() 58 ); 59 http.addFilterBefore(jwtTokenFilter(), UsernamePasswordAuthenticationFilter.class); 60 return http.build(); 61 } 62 63 @Bean 64 public AuthenticationProvider authenticationProvider() { 65 DaoAuthenticationProvider authProvider = new DaoAuthenticationProvider(); 66 authProvider.setUserDetailsService(userDetailsService); 67 authProvider.setPasswordEncoder(passwordEncoder()); 68 return authProvider; 69 } 70 71} chart.js is a library that provides various charts, the project renders charts and the data is fetched from Rest API.\n1import { Component, OnInit } from \u0026#39;@angular/core\u0026#39;; 2import { RestService } from \u0026#39;../../services/rest.service\u0026#39;; 3import { ChartDataSets } from \u0026#39;chart.js\u0026#39;; 4import { Label, monkeyPatchChartJsLegend, monkeyPatchChartJsTooltip } from \u0026#39;ng2-charts\u0026#39;; 5import { Router } from \u0026#39;@angular/router\u0026#39;; 6 7@Component({ 8 selector: \u0026#39;app-chart\u0026#39;, 9 templateUrl: \u0026#39;./chart.component.html\u0026#39; 10}) 11export class ChartComponent implements OnInit { 12 13 pieData: ChartDataSets[] = []; 14 pieLabel: Label[] = []; 15 pieOptions: any; 16 17 barData: ChartDataSets[] = []; 18 barLabel: Label[] = []; 19 barOptions: any; 20 21 lineData: ChartDataSets[] = []; 22 lineLabel: Label[] = []; 23 lineOptions: any; 24 25 columnData: ChartDataSets[] = []; 26 columnLabel: Label[] = []; 27 columnOptions: any; 28 29 constructor(private restService: RestService, private router: Router) { 30 monkeyPatchChartJsTooltip(); 31 monkeyPatchChartJsLegend(); 32 } 33 34 ngOnInit(): void { 35 const backgroundColors = [ 36 \u0026#39;rgba(255, 99, 132, 0.2)\u0026#39;, 37 \u0026#39;rgba(255, 159, 64, 0.2)\u0026#39;, 38 \u0026#39;rgba(255, 205, 86, 0.2)\u0026#39;, 39 \u0026#39;rgba(75, 192, 192, 0.2)\u0026#39;, 40 \u0026#39;rgba(54, 162, 235, 0.2)\u0026#39;, 41 \u0026#39;rgba(153, 102, 255, 0.2)\u0026#39;, 42 \u0026#39;rgba(201, 203, 207, 0.2)\u0026#39; 43 ]; 44 const borderColors = [ 45 \u0026#39;rgb(255, 99, 132)\u0026#39;, 46 \u0026#39;rgb(255, 159, 64)\u0026#39;, 47 \u0026#39;rgb(255, 205, 86)\u0026#39;, 48 \u0026#39;rgb(75, 192, 192)\u0026#39;, 49 \u0026#39;rgb(54, 162, 235)\u0026#39;, 50 \u0026#39;rgb(153, 102, 255)\u0026#39;, 51 \u0026#39;rgb(201, 203, 207)\u0026#39; 52 ]; 53 54 this.pieOptions = { 55 title: { 56 display: true, 57 text: \u0026#39;Pie Chart\u0026#39; 58 }, 59 responsive: true, 60 maintainAspectRatio: false, 61 scales: { 62 yAxes: [{ 63 ticks: { 64 beginAtZero: true 65 } 66 }] 67 } 68 }; 69 70 this.barOptions = { 71 title: { 72 display: true, 73 text: \u0026#39;Bar Chart\u0026#39; 74 }, 75 responsive: true, 76 maintainAspectRatio: false, 77 scales: { 78 yAxes: [{ 79 ticks: { 80 beginAtZero: true 81 } 82 }] 83 } 84 }; 85 86 this.lineOptions = { 87 title: { 88 display: true, 89 text: \u0026#39;Line Chart\u0026#39; 90 }, 91 responsive: true, 92 maintainAspectRatio: false, 93 scales: { 94 yAxes: [{ 95 ticks: { 96 beginAtZero: true 97 } 98 }] 99 } 100 }; 101 102 this.columnOptions = { 103 title: { 104 display: true, 105 text: \u0026#39;Column Chart\u0026#39; 106 }, 107 responsive: true, 108 maintainAspectRatio: false, 109 scales: { 110 x: { 111 stacked: true, 112 }, 113 y: { 114 stacked: true 115 } 116 } 117 }; 118 119 this.restService.getPieData().subscribe(data =\u0026gt; { 120 this.pieData = [ 121 { data: data[1], label: \u0026#39;Pie Chart\u0026#39;, backgroundColor: backgroundColors, borderColor: borderColors, borderWidth: 1 }, 122 ]; 123 this.pieLabel = data[0]; 124 }); 125 126 this.restService.getPieData().subscribe(data =\u0026gt; { 127 this.barData = [ 128 { data: data[1], label: \u0026#39;Bar Chart\u0026#39;, backgroundColor: backgroundColors, borderColor: borderColors, borderWidth: 1 }, 129 ]; 130 this.barLabel = data[0]; 131 }); 132 133 this.restService.getPieData().subscribe(data =\u0026gt; { 134 this.lineData = [ 135 { data: data[1], label: \u0026#39;Line Chart\u0026#39;, backgroundColor: backgroundColors, borderColor: borderColors, borderWidth: 1 }, 136 ]; 137 this.lineLabel = data[0]; 138 }); 139 140 this.restService.getColumnData().subscribe(data =\u0026gt; { 141 this.columnData = [ 142 { 143 data: data[1].data, 144 label: data[1].name, 145 backgroundColor: backgroundColors[0], 146 borderColor: borderColors[0], 147 borderWidth: 1, 148 fill: false 149 }, 150 { 151 data: data[2].data, 152 label: data[2].name, 153 backgroundColor: backgroundColors[1], 154 borderColor: borderColors[1], 155 borderWidth: 1, 156 fill: false 157 }, 158 { 159 data: data[3].data, 160 label: data[3].name, 161 backgroundColor: backgroundColors[2], 162 borderColor: borderColors[2], 163 borderWidth: 1, 164 fill: false 165 } 166 ]; 167 this.columnLabel = data[0].data; 168 }); 169 } 170 171} 1\u0026lt;div class=\u0026#34;content-container\u0026#34;\u0026gt; 2 \u0026lt;div class=\u0026#34;content-area\u0026#34;\u0026gt; 3 \u0026lt;div class=\u0026#34;clr-row\u0026#34;\u0026gt; 4 \u0026lt;div class=\u0026#34;clr-col-6\u0026#34;\u0026gt; 5 \u0026lt;canvas baseChart [datasets]=\u0026#34;pieData\u0026#34; [labels]=\u0026#34;pieLabel\u0026#34; [options]=\u0026#34;pieOptions\u0026#34; [chartType]=\u0026#34;\u0026#39;pie\u0026#39;\u0026#34; 6 width=\u0026#34;200\u0026#34; height=\u0026#34;300\u0026#34;\u0026gt; 7 \u0026lt;/canvas\u0026gt; 8 \u0026lt;/div\u0026gt; 9 \u0026lt;div class=\u0026#34;clr-col-6\u0026#34;\u0026gt; 10 \u0026lt;canvas baseChart [datasets]=\u0026#34;barData\u0026#34; [labels]=\u0026#34;barLabel\u0026#34; [options]=\u0026#34;barOptions\u0026#34; [chartType]=\u0026#34;\u0026#39;bar\u0026#39;\u0026#34; 11 width=\u0026#34;200\u0026#34; height=\u0026#34;300\u0026#34;\u0026gt; 12 \u0026lt;/canvas\u0026gt; 13 \u0026lt;/div\u0026gt; 14 \u0026lt;/div\u0026gt; 15 \u0026lt;div class=\u0026#34;clr-row\u0026#34;\u0026gt; 16 \u0026lt;div class=\u0026#34;clr-col-6\u0026#34;\u0026gt; 17 \u0026lt;canvas baseChart [datasets]=\u0026#34;lineData\u0026#34; [labels]=\u0026#34;lineLabel\u0026#34; [options]=\u0026#34;lineOptions\u0026#34; [chartType]=\u0026#34;\u0026#39;line\u0026#39;\u0026#34; 18 width=\u0026#34;200\u0026#34; height=\u0026#34;300\u0026#34;\u0026gt; 19 \u0026lt;/canvas\u0026gt; 20 \u0026lt;/div\u0026gt; 21 \u0026lt;div class=\u0026#34;clr-col-6\u0026#34;\u0026gt; 22 \u0026lt;canvas baseChart [datasets]=\u0026#34;columnData\u0026#34; [labels]=\u0026#34;columnLabel\u0026#34; [options]=\u0026#34;columnOptions\u0026#34; [chartType]=\u0026#34;\u0026#39;bar\u0026#39;\u0026#34; 23 width=\u0026#34;200\u0026#34; height=\u0026#34;300\u0026#34;\u0026gt; 24 \u0026lt;/canvas\u0026gt; 25 \u0026lt;/div\u0026gt; 26 \u0026lt;/div\u0026gt; 27 \u0026lt;/div\u0026gt; 28\u0026lt;/div\u0026gt; 29 1import { NgModule } from \u0026#39;@angular/core\u0026#39;; 2import { RouterModule, Routes } from \u0026#39;@angular/router\u0026#39;; 3import { HomeComponent } from \u0026#39;./components/home/home.component\u0026#39;; 4import { LoginComponent } from \u0026#39;./components/login/login.component\u0026#39;; 5import { ChartComponent } from \u0026#39;./components/chart/chart.component\u0026#39;; 6import { AuthGuard } from \u0026#39;./shared/auth.guard\u0026#39;; 7 8const routes: Routes = [ 9 { path: \u0026#39;\u0026#39;, redirectTo: \u0026#39;home\u0026#39;, pathMatch: \u0026#39;full\u0026#39;, canActivate: [AuthGuard] }, 10 { path: \u0026#39;home\u0026#39;, component: HomeComponent, canActivate: [AuthGuard] }, 11 { path: \u0026#39;login\u0026#39;, component: LoginComponent }, 12 { path: \u0026#39;logout\u0026#39;, component: LoginComponent }, 13 { path: \u0026#39;charts\u0026#39;, component: ChartComponent, canActivate: [AuthGuard] }, 14]; 15 16@NgModule({ 17 imports: [RouterModule.forRoot(routes, { useHash: true })], 18 exports: [RouterModule] 19}) 20 21export class AppRoutingModule { 22} 1\u0026lt;div class=\u0026#34;content-container\u0026#34;\u0026gt; 2 \u0026lt;div class=\u0026#34;content-area\u0026#34;\u0026gt; 3 4 \u0026lt;div class=\u0026#34;clr-row\u0026#34;\u0026gt; 5 6 \u0026lt;div class=\u0026#34;clr-col-12\u0026#34;\u0026gt; 7 \u0026lt;div class=\u0026#34;alert-section\u0026#34;\u0026gt; 8 \u0026lt;app-alert\u0026gt;\u0026lt;/app-alert\u0026gt; 9 \u0026lt;/div\u0026gt; 10 11 \u0026lt;p style=\u0026#34;text-align: center\u0026#34;\u0026gt; 12 \u0026lt;!-- interpolation \u0026amp; pipe --\u0026gt; 13 Server Time: {{currentTime | date:\u0026#39;dd-MM-yyyy\u0026#39; }} 14 \u0026lt;/p\u0026gt; 15 16 \u0026lt;h2 style=\u0026#34;text-align: center\u0026#34;\u0026gt;Customers\u0026lt;/h2\u0026gt; 17 18 \u0026lt;clr-datagrid\u0026gt; 19 \u0026lt;clr-dg-placeholder class=\u0026#34;content-center\u0026#34;\u0026gt;No Customers!\u0026lt;/clr-dg-placeholder\u0026gt; 20 \u0026lt;clr-dg-column [clrDgField]=\u0026#34;\u0026#39;id\u0026#39;\u0026#34;\u0026gt;ID\u0026lt;/clr-dg-column\u0026gt; 21 \u0026lt;clr-dg-column [clrDgField]=\u0026#34;\u0026#39;firstName\u0026#39;\u0026#34;\u0026gt;First Name\u0026lt;/clr-dg-column\u0026gt; 22 \u0026lt;clr-dg-column [clrDgField]=\u0026#34;\u0026#39;lastName\u0026#39;\u0026#34;\u0026gt;Last Name\u0026lt;/clr-dg-column\u0026gt; 23 \u0026lt;clr-dg-column [clrDgField]=\u0026#34;\u0026#39;city\u0026#39;\u0026#34;\u0026gt;City\u0026lt;/clr-dg-column\u0026gt; 24 \u0026lt;clr-dg-column\u0026gt;Action\u0026lt;/clr-dg-column\u0026gt; 25 \u0026lt;!-- structural directive --\u0026gt; 26 \u0026lt;clr-dg-row clr-dg-row *clrDgItems=\u0026#34;let customer of customers\u0026#34;\u0026gt; 27 \u0026lt;clr-dg-cell\u0026gt;{{customer.id}}\u0026lt;/clr-dg-cell\u0026gt; 28 \u0026lt;clr-dg-cell\u0026gt;{{customer.firstName}}\u0026lt;/clr-dg-cell\u0026gt; 29 \u0026lt;clr-dg-cell\u0026gt;{{customer.lastName}}\u0026lt;/clr-dg-cell\u0026gt; 30 \u0026lt;clr-dg-cell\u0026gt;{{customer.city}}\u0026lt;/clr-dg-cell\u0026gt; 31 \u0026lt;clr-dg-cell\u0026gt; 32 \u0026lt;cds-icon shape=\u0026#34;trash\u0026#34; style=\u0026#34;cursor: pointer; color: blue\u0026#34; (click)=\u0026#34;deleteCustomer(customer)\u0026#34;\u0026gt; 33 \u0026lt;/cds-icon\u0026gt; 34 \u0026lt;/clr-dg-cell\u0026gt; 35 \u0026lt;/clr-dg-row\u0026gt; 36 \u0026lt;clr-dg-footer\u0026gt; 37 \u0026lt;clr-dg-pagination #pagination [clrDgPageSize]=\u0026#34;10\u0026#34;\u0026gt; 38 \u0026lt;clr-dg-page-size [clrPageSizeOptions]=\u0026#34;[10,20,50,100]\u0026#34;\u0026gt;Customers per page\u0026lt;/clr-dg-page-size\u0026gt; 39 {{pagination.firstItem + 1}} - {{pagination.lastItem + 1}} of {{pagination.totalItems}} customers 40 \u0026lt;/clr-dg-pagination\u0026gt; 41 \u0026lt;/clr-dg-footer\u0026gt; 42 \u0026lt;/clr-datagrid\u0026gt; 43 44 \u0026lt;div class=\u0026#34;clr-col-12\u0026#34;\u0026gt; 45 \u0026lt;!-- template driven form--\u0026gt; 46 \u0026lt;form class=\u0026#34;clr-form clr-form-horizontal\u0026#34; (ngSubmit)=\u0026#34;saveCustomer()\u0026#34;\u0026gt; 47 \u0026lt;div class=\u0026#34;clr-form-control\u0026#34;\u0026gt; 48 \u0026lt;label for=\u0026#34;firstName\u0026#34; class=\u0026#34;clr-control-label\u0026#34;\u0026gt;First Name\u0026lt;/label\u0026gt; 49 \u0026lt;div class=\u0026#34;clr-control-container\u0026#34;\u0026gt; 50 \u0026lt;div class=\u0026#34;clr-input-wrapper\u0026#34;\u0026gt; 51 \u0026lt;!-- two way data binding --\u0026gt; 52 \u0026lt;input type=\u0026#34;text\u0026#34; [(ngModel)]=\u0026#34;customer.firstName\u0026#34; id=\u0026#34;firstName\u0026#34; name=\u0026#34;firstName\u0026#34; 53 placeholder=\u0026#34;Firt Name\u0026#34; class=\u0026#34;clr-input\u0026#34;/\u0026gt; 54 \u0026lt;/div\u0026gt; 55 \u0026lt;/div\u0026gt; 56 \u0026lt;/div\u0026gt; 57 \u0026lt;div class=\u0026#34;clr-form-control\u0026#34;\u0026gt; 58 \u0026lt;label for=\u0026#34;lastName\u0026#34; class=\u0026#34;clr-control-label\u0026#34;\u0026gt;Last Name\u0026lt;/label\u0026gt; 59 \u0026lt;div class=\u0026#34;clr-control-container\u0026#34;\u0026gt; 60 \u0026lt;div class=\u0026#34;clr-input-wrapper\u0026#34;\u0026gt; 61 \u0026lt;input [(ngModel)]=\u0026#34;customer.lastName\u0026#34; type=\u0026#34;text\u0026#34; id=\u0026#34;lastName\u0026#34; name=\u0026#34;lastName\u0026#34; 62 placeholder=\u0026#34;Last Name\u0026#34; class=\u0026#34;clr-input\u0026#34;/\u0026gt; 63 \u0026lt;/div\u0026gt; 64 \u0026lt;/div\u0026gt; 65 \u0026lt;/div\u0026gt; 66 \u0026lt;div class=\u0026#34;clr-form-control\u0026#34;\u0026gt; 67 \u0026lt;div class=\u0026#34;clr-control-container\u0026#34;\u0026gt; 68 \u0026lt;!-- event binding --\u0026gt; 69 \u0026lt;button type=\u0026#34;submit\u0026#34; class=\u0026#34;btn btn-primary\u0026#34; [disabled]=\u0026#34;\u0026#34;\u0026gt;Save\u0026lt;/button\u0026gt; 70 \u0026lt;/div\u0026gt; 71 \u0026lt;/div\u0026gt; 72 \u0026lt;/form\u0026gt; 73 \u0026lt;/div\u0026gt; 74 75 \u0026lt;/div\u0026gt; 76 \u0026lt;/div\u0026gt; 77 \u0026lt;/div\u0026gt; 78\u0026lt;/div\u0026gt; 1import {Component, OnInit, ViewChild} from \u0026#39;@angular/core\u0026#39;; 2import {Customer} from \u0026#39;../../models/customer\u0026#39;; 3import {RestService} from \u0026#39;../../services/rest.service\u0026#39;; 4import {Router} from \u0026#39;@angular/router\u0026#39;; 5import {ClarityIcons, trashIcon} from \u0026#39;@cds/core/icon\u0026#39;; 6import {AlertComponent} from \u0026#34;../alert/alert.component\u0026#34;; 7 8@Component({ 9 selector: \u0026#39;app-home\u0026#39;, 10 templateUrl: \u0026#39;./home.component.html\u0026#39;, 11 styleUrls: [] 12}) 13export class HomeComponent implements OnInit { 14 15 customers: Customer[] = []; 16 customer: Customer = new Customer(); 17 currentTime = \u0026#39;\u0026#39;; 18 // @ts-ignore 19 @ViewChild(AlertComponent, {static: true}) private alert: AlertComponent; 20 21 constructor(private restService: RestService, private router: Router) { 22 ClarityIcons.addIcons(trashIcon); 23 } 24 25 ngOnInit(): void { 26 this.getCustomers(); 27 } 28 29 getCustomers(): void { 30 this.customer = new Customer(); 31 this.restService.getTime().subscribe(data =\u0026gt; { 32 this.currentTime = data; 33 }); 34 this.restService.getCustomers().subscribe(data =\u0026gt; { 35 this.customers = data; 36 }); 37 } 38 39 saveCustomer(): void { 40 this.restService.saveCustomer(this.customer) 41 .subscribe(data =\u0026gt; { 42 this.alert.showSuccess(\u0026#39;Saved customer: \u0026#39; + this.customer.firstName); 43 this.getCustomers(); 44 }, error =\u0026gt; { 45 this.alert.showError(\u0026#39;Forbidden!\u0026#39;); 46 console.log(error); 47 }); 48 } 49 50 deleteCustomer(customer: Customer): void { 51 console.log(\u0026#39;delete: \u0026#39; + customer.id); 52 this.restService.deleteCustomer(customer.id) 53 .subscribe(data =\u0026gt; { 54 this.alert.showSuccess(\u0026#39;Deleted customer: \u0026#39; + customer.id); 55 this.getCustomers(); 56 }, error =\u0026gt; { 57 this.alert.showError(\u0026#39;Forbidden!\u0026#39;); 58 console.log(error); 59 }); 60 } 61} For older versions of spring boot that dont redirect to index.html add this mapping to the controller.\n1import javax.servlet.http.HttpServletRequest; 2 3import org.springframework.stereotype.Controller; 4import org.springframework.web.bind.annotation.RequestMapping; 5 6@Controller 7public class IndexController { 8 9 @RequestMapping(value = {\u0026#34;/\u0026#34;, \u0026#34;/{x:[\\\\w\\\\-]+}\u0026#34;, \u0026#34;/{x:^(?!api$).*$}/**/{y:[\\\\w\\\\-]+}\u0026#34;}) 10 public String getIndex(HttpServletRequest request) { 11 return \u0026#34;/index.html\u0026#34;; 12 } 13} Setup 1# Project 88 2 3SpringBoot Web, JWT, Angular, Clarity, Authentication, Authorization, Postgres, Charts 4 5[https://gitorko.github.io/spring-boot-angular/](https://gitorko.github.io/spring-boot-angular/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk version \u0026#34;21.0.3\u0026#34; 2024-04-16 LTS 14 15node --version 16v16.16.0 17 18yarn --version 191.22.18 20``` 21 22### Postgres DB 23 24``` 25docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:9.6.10 26docker ps 27docker exec -it pg-container psql -U postgres -W postgres 28CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 29CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 30grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 31 32docker stop pg-container 33docker start pg-container 34``` 35 36### Dev 37 38To run the backend in dev mode. 39 40```bash 41./gradlew clean build 42./gradlew bootRun 43``` 44 45To Run UI in dev mode 46 47```bash 48cd ui 49yarn install 50yarn build 51yarn start 52``` 53 54Open [http://localhost:3000](http://localhost:3000) 55 56### Prod 57 58To run as a single jar, both UI and backend are bundled to single uber jar. 59 60```bash 61./gradlew cleanBuild 62cd build/libs 63java -jar project88-1.0.0.jar 64``` 65 66Open [http://localhost:8080/](http://localhost:8080/) 67 68``` 69user: admin 70pwd: admin@123 71 72user: user 73pwd: user@123 74``` 75 76### Docker 77 78```bash 79./gradlew cleanBuild 80docker build -f docker/Dockerfile --force-rm -t project88:1.0.0 . 81docker images |grep project88 82docker tag project88:1.0.0 gitorko/project88:1.0.0 83docker push gitorko/project88:1.0.0 84docker-compose -f docker/docker-compose.yml up 85``` Testing 1curl --location --request POST \u0026#39;http://localhost:8080/api/auth/login\u0026#39; \\ 2--header \u0026#39;Content-Type: application/json\u0026#39; \\ 3--data-raw \u0026#39;{ 4 \u0026#34;username\u0026#34;: \u0026#34;admin\u0026#34;, 5 \u0026#34;password\u0026#34;: \u0026#34;admin@123\u0026#34; 6}\u0026#39; 1curl --location --request GET \u0026#39;http://localhost:8080/api/time\u0026#39; 1curl --location --request GET \u0026#39;http://localhost:8080/api/customer\u0026#39; \\ 2--header \u0026#39;Authorization: Bearer \u0026lt;TOKEN\u0026gt;\u0026#39; References https://clarity.design/\nhttps://www.chartjs.org/\n","link":"https://gitorko.github.io/post/spring-angular/","section":"post","tags":["spring","spring-boot","angular","chart.js","jpa","clarity","jwt"],"title":"Spring - Angular"},{"body":"","link":"https://gitorko.github.io/tags/hsqldb/","section":"tags","tags":null,"title":"Hsqldb"},{"body":"Spring Boot MVC application with Thymeleaf template \u0026amp; basic spring security support, uses bootstrap for CSS and chart.js for rendering charts. Creates uber jar to deploy.\nGithub: https://github.com/gitorko/project79\nQuick Overview To deploy the application in a single command, clone the project, make sure no conflicting docker containers or ports are running and then run\n1git clone https://github.com/gitorko/project79 2cd project79 3docker-compose -f docker/docker-compose.yml up Open http://localhost:8080/\nFeatures A Spring Web MVC application that renders thymeleaf templates as HTML. Supports basic integration with spring security and provides login logout support. Uses Spring Data to persist data into the HSQL db. A file based HSQL server db is used so that data persists across restarts. This can easily be changed to in-memory HSQL db. Spring dev tools allow seamless reload on any changes for html and java files so you can view the changes in the browser as soon as you edit them.\nSupports basic login via spring security Bootstrap 5 Login screen CRUD UI for adding and removing customer HSQL db Spring JPA Thymeleaf template Chart.js charts for bar,pie,stack charts with data from rest api Implementation Design Code On Intellij to allow spring dev tools to reload on change you need to enable 'Update classes and resources' as shown below\nSpring MVC controller renders the HTML.\n1package com.demo.project79.controller; 2 3import java.security.Principal; 4import java.util.Date; 5 6import com.demo.project79.domain.Customer; 7import com.demo.project79.repo.CustomerRepository; 8import lombok.RequiredArgsConstructor; 9import lombok.extern.slf4j.Slf4j; 10import org.springframework.beans.factory.annotation.Autowired; 11import org.springframework.stereotype.Controller; 12import org.springframework.ui.Model; 13import org.springframework.web.bind.annotation.GetMapping; 14import org.springframework.web.bind.annotation.PathVariable; 15import org.springframework.web.bind.annotation.PostMapping; 16import org.springframework.web.bind.annotation.RequestParam; 17import org.springframework.web.servlet.mvc.support.RedirectAttributes; 18 19@Controller 20@Slf4j 21@RequiredArgsConstructor 22public class HomeController { 23 24 @Autowired 25 CustomerRepository customerRepo; 26 27 @GetMapping(value = \u0026#34;/\u0026#34;) 28 public String home(Model model) { 29 Iterable\u0026lt;Customer\u0026gt; customerLst = customerRepo.findAll(); 30 model.addAttribute(\u0026#34;customerLst\u0026#34;, customerLst); 31 model.addAttribute(\u0026#34;serverTime\u0026#34;, new Date()); 32 return \u0026#34;home\u0026#34;; 33 } 34 35 @PostMapping(value = \u0026#34;/save\u0026#34;) 36 public String customerSave(@RequestParam(value = \u0026#34;firstName\u0026#34;) String firstName, @RequestParam(value = \u0026#34;lastName\u0026#34;) String lastName, 37 Model model, RedirectAttributes redirAttrs, Principal principal) { 38 log.info(\u0026#34;Name: \u0026#34; + firstName); 39 Customer customer = new Customer(); 40 customer.setFirstName(firstName); 41 customer.setLastName(lastName); 42 customerRepo.save(customer); 43 redirAttrs.addFlashAttribute(\u0026#34;successMsg\u0026#34;, \u0026#34;Successfully added user by: \u0026#34; + principal.getName()); 44 return \u0026#34;redirect:/\u0026#34;; 45 } 46 47 @GetMapping(value = \u0026#34;/delete/{id}\u0026#34;) 48 public String customerSave(@PathVariable Long id, RedirectAttributes redirAttrs, Principal principal) { 49 log.info(\u0026#34;User {} deleted by {}\u0026#34;, id, principal.getName()); 50 customerRepo.deleteById(id); 51 redirAttrs.addFlashAttribute(\u0026#34;successMsg\u0026#34;, \u0026#34;Deleted user: \u0026#34; + id); 52 return \u0026#34;redirect:/\u0026#34;; 53 } 54 55 @GetMapping(value = \u0026#34;/charts\u0026#34;) 56 public String chartsHome(Model model) { 57 return \u0026#34;charts\u0026#34;; 58 } 59 60} Spring Security is configured for BASIC authentication\n1package com.demo.project79.config; 2 3import org.springframework.context.annotation.Bean; 4import org.springframework.context.annotation.Configuration; 5import org.springframework.security.config.annotation.web.builders.HttpSecurity; 6import org.springframework.security.config.annotation.web.configuration.EnableWebSecurity; 7import org.springframework.security.core.userdetails.User; 8import org.springframework.security.crypto.factory.PasswordEncoderFactories; 9import org.springframework.security.crypto.password.PasswordEncoder; 10import org.springframework.security.provisioning.InMemoryUserDetailsManager; 11import org.springframework.security.web.SecurityFilterChain; 12 13@Configuration 14@EnableWebSecurity 15public class WebSecurityConfig { 16 17 public static final String USER_ROLE = \u0026#34;ADMIN\u0026#34;; 18 public static final String USER_NAME = \u0026#34;admin\u0026#34;; 19 public static final String USER_PASSWORD = \u0026#34;admin@123\u0026#34;; 20 21 @Bean 22 SecurityFilterChain securityFilterChain(HttpSecurity http) throws Exception { 23 return http 24 .csrf(csrf -\u0026gt; csrf.disable()) 25 .authorizeHttpRequests(authorize -\u0026gt; authorize 26 .requestMatchers(\u0026#34;/\u0026#34;, \u0026#34;/home\u0026#34;, \u0026#34;/rest/**\u0026#34;).permitAll() 27 .requestMatchers(\u0026#34;/js/**\u0026#34;, \u0026#34;/css/**\u0026#34;, \u0026#34;/images/**\u0026#34;).permitAll() 28 .anyRequest().authenticated() 29 ) 30 .formLogin(form -\u0026gt; form 31 .loginPage(\u0026#34;/login\u0026#34;) 32 .permitAll() 33 ) 34 .logout(logout -\u0026gt; logout 35 .permitAll() 36 ).build(); 37 } 38 39 @Bean 40 public InMemoryUserDetailsManager userDetailsService() { 41 PasswordEncoder encoder = PasswordEncoderFactories.createDelegatingPasswordEncoder(); 42 return new InMemoryUserDetailsManager( 43 User.withUsername(USER_NAME) 44 .password(encoder.encode(USER_PASSWORD)) 45 .roles(USER_ROLE) 46 .build() 47 ); 48 } 49} chart.js is a library that provides various charts, the project renders charts and the data is fetched from Rest API.\n1\u0026lt;!doctype html\u0026gt; 2\u0026lt;html lang=\u0026#34;en\u0026#34; xmlns:th=\u0026#34;https://www.thymeleaf.org\u0026#34;\u0026gt; 3\u0026lt;head\u0026gt; 4 \u0026lt;div th:replace=\u0026#34;fragments/general :: include-frag\u0026#34;/\u0026gt; 5 \u0026lt;script src=\u0026#34;https://cdn.jsdelivr.net/npm/chart.js\u0026#34;\u0026gt;\u0026lt;/script\u0026gt; 6 \u0026lt;script src=\u0026#34;js/charts.js\u0026#34;\u0026gt;\u0026lt;/script\u0026gt; 7\u0026lt;/head\u0026gt; 8\u0026lt;body\u0026gt; 9 10\u0026lt;div th:replace=\u0026#34;fragments/general :: menu-frag\u0026#34;/\u0026gt; 11 12\u0026lt;div class=\u0026#34;container\u0026#34;\u0026gt; 13 \u0026lt;div th:replace=\u0026#34;fragments/general :: flash-message-frag\u0026#34;/\u0026gt; 14 \u0026lt;br/\u0026gt; 15 \u0026lt;br/\u0026gt; 16 \u0026lt;div class=\u0026#34;row\u0026#34;\u0026gt; 17 \u0026lt;div class=\u0026#34;col\u0026#34;\u0026gt; 18 \u0026lt;canvas id=\u0026#34;piechartContainer\u0026#34; width=\u0026#34;200\u0026#34; height=\u0026#34;300\u0026#34;\u0026gt;\u0026lt;/canvas\u0026gt; 19 \u0026lt;/div\u0026gt; 20 \u0026lt;div class=\u0026#34;col\u0026#34;\u0026gt; 21 \u0026lt;canvas id=\u0026#34;barchartContainer\u0026#34; width=\u0026#34;200\u0026#34; height=\u0026#34;300\u0026#34;\u0026gt;\u0026lt;/canvas\u0026gt; 22 \u0026lt;/div\u0026gt; 23 \u0026lt;/div\u0026gt; 24 \u0026lt;br/\u0026gt; 25 \u0026lt;br/\u0026gt; 26 \u0026lt;div class=\u0026#34;row\u0026#34;\u0026gt; 27 \u0026lt;div class=\u0026#34;col\u0026#34;\u0026gt; 28 \u0026lt;canvas id=\u0026#34;linechartContainer\u0026#34; width=\u0026#34;200\u0026#34; height=\u0026#34;300\u0026#34;\u0026gt;\u0026lt;/canvas\u0026gt; 29 \u0026lt;/div\u0026gt; 30 \u0026lt;div class=\u0026#34;col\u0026#34;\u0026gt; 31 \u0026lt;canvas id=\u0026#34;stackchartContainer\u0026#34; width=\u0026#34;200\u0026#34; height=\u0026#34;300\u0026#34;\u0026gt;\u0026lt;/canvas\u0026gt; 32 \u0026lt;/div\u0026gt; 33 \u0026lt;/div\u0026gt; 34\u0026lt;/div\u0026gt; 35 36\u0026lt;/body\u0026gt; 37\u0026lt;/html\u0026gt; 1$(function() { 2 3 var backgroundColors = [ 4 \u0026#39;rgba(255, 99, 132, 0.2)\u0026#39;, 5 \u0026#39;rgba(255, 159, 64, 0.2)\u0026#39;, 6 \u0026#39;rgba(255, 205, 86, 0.2)\u0026#39;, 7 \u0026#39;rgba(75, 192, 192, 0.2)\u0026#39;, 8 \u0026#39;rgba(54, 162, 235, 0.2)\u0026#39;, 9 \u0026#39;rgba(153, 102, 255, 0.2)\u0026#39;, 10 \u0026#39;rgba(201, 203, 207, 0.2)\u0026#39; 11 ]; 12 13 var borderColors = [ 14 \u0026#39;rgb(255, 99, 132)\u0026#39;, 15 \u0026#39;rgb(255, 159, 64)\u0026#39;, 16 \u0026#39;rgb(255, 205, 86)\u0026#39;, 17 \u0026#39;rgb(75, 192, 192)\u0026#39;, 18 \u0026#39;rgb(54, 162, 235)\u0026#39;, 19 \u0026#39;rgb(153, 102, 255)\u0026#39;, 20 \u0026#39;rgb(201, 203, 207)\u0026#39; 21 ]; 22 23 $.getJSON(\u0026#34;/rest/pie-data\u0026#34;, function(json) { 24 new Chart(document.getElementById(\u0026#34;piechartContainer\u0026#34;), { 25 type: \u0026#39;pie\u0026#39;, 26 data: { 27 labels: json[0], 28 datasets: [{ 29 backgroundColor: backgroundColors, 30 borderColor: borderColors, 31 borderWidth: 1, 32 hoverOffset: 4, 33 data: json[1] 34 }] 35 }, 36 options: { 37 title: { 38 display: true, 39 text: \u0026#39;Pie Chart\u0026#39; 40 }, 41 responsive: true, 42 maintainAspectRatio: false, 43 scales: { 44 yAxes: [{ 45 ticks: { 46 beginAtZero:true 47 } 48 }] 49 } 50 } 51 }); 52 }); 53 54 $.getJSON(\u0026#34;/rest/pie-data\u0026#34;, function(json) { 55 new Chart(document.getElementById(\u0026#34;barchartContainer\u0026#34;), { 56 type: \u0026#39;bar\u0026#39;, 57 data: { 58 labels: json[0], 59 datasets: [{ 60 label: \u0026#39;My First Dataset\u0026#39;, 61 backgroundColor: backgroundColors, 62 borderColor: borderColors, 63 borderWidth: 1, 64 hoverOffset: 4, 65 data: json[1] 66 }] 67 }, 68 options: { 69 title: { 70 display: true, 71 text: \u0026#39;Bar Chart\u0026#39; 72 }, 73 responsive: true, 74 maintainAspectRatio: false, 75 scales: { 76 yAxes: [{ 77 ticks: { 78 beginAtZero:true 79 } 80 }] 81 } 82 } 83 }); 84 }); 85 86 $.getJSON(\u0026#34;/rest/pie-data\u0026#34;, function(json) { 87 new Chart(document.getElementById(\u0026#34;linechartContainer\u0026#34;), { 88 type: \u0026#39;line\u0026#39;, 89 data: { 90 labels: json[0], 91 datasets: [{ 92 label: \u0026#39;My First Dataset\u0026#39;, 93 backgroundColor: backgroundColors, 94 borderColor: borderColors, 95 fill: false, 96 borderWidth: 1, 97 hoverOffset: 4, 98 data: json[1] 99 }] 100 }, 101 options: { 102 title: { 103 display: true, 104 text: \u0026#39;Line Chart\u0026#39; 105 }, 106 responsive: true, 107 maintainAspectRatio: false, 108 scales: { 109 yAxes: [{ 110 ticks: { 111 beginAtZero:true 112 } 113 }] 114 } 115 } 116 }); 117 }); 118 119 $.getJSON(\u0026#34;/rest/column-data\u0026#34;, function(json) { 120 new Chart(document.getElementById(\u0026#34;stackchartContainer\u0026#34;), { 121 type: \u0026#39;bar\u0026#39;, 122 data: { 123 labels: json[0][\u0026#34;data\u0026#34;], 124 datasets: [{ 125 label: json[1][\u0026#34;name\u0026#34;], 126 backgroundColor: backgroundColors[0], 127 borderColor: borderColors[0], 128 fill: false, 129 borderWidth: 1, 130 hoverOffset: 4, 131 data: json[1][\u0026#34;data\u0026#34;] 132 },{ 133 label: json[2][\u0026#34;name\u0026#34;], 134 backgroundColor: backgroundColors[1], 135 borderColor: borderColors[1], 136 fill: false, 137 borderWidth: 1, 138 hoverOffset: 4, 139 data: json[2][\u0026#34;data\u0026#34;] 140 },{ 141 label: json[3][\u0026#34;name\u0026#34;], 142 backgroundColor: backgroundColors[2], 143 borderColor: borderColors[2], 144 fill: false, 145 borderWidth: 1, 146 hoverOffset: 4, 147 data: json[3][\u0026#34;data\u0026#34;] 148 }] 149 }, 150 options: { 151 title: { 152 display: true, 153 text: \u0026#39;Stack Chart\u0026#39; 154 }, 155 responsive: true, 156 maintainAspectRatio: false, 157 scales: { 158 x: { 159 stacked: true, 160 }, 161 y: { 162 stacked: true 163 } 164 } 165 } 166 }); 167 }); 168 169}); 1\u0026lt;!doctype html\u0026gt; 2\u0026lt;html lang=\u0026#34;en\u0026#34; xmlns:th=\u0026#34;https://www.thymeleaf.org\u0026#34;\u0026gt; 3\u0026lt;head\u0026gt; 4 \u0026lt;div th:replace=\u0026#34;fragments/general :: include-frag\u0026#34;/\u0026gt; 5 \u0026lt;script src=\u0026#34;js/home.js\u0026#34;\u0026gt;\u0026lt;/script\u0026gt; 6\u0026lt;/head\u0026gt; 7\u0026lt;body\u0026gt; 8 9\u0026lt;div th:replace=\u0026#34;fragments/general :: menu-frag\u0026#34;/\u0026gt; 10 11\u0026lt;div class=\u0026#34;container\u0026#34;\u0026gt; 12 \u0026lt;div th:replace=\u0026#34;fragments/general :: flash-message-frag\u0026#34;/\u0026gt; 13 14 \u0026lt;form method=\u0026#34;post\u0026#34; th:action=\u0026#34;@{/save}\u0026#34; role=\u0026#34;form\u0026#34; class=\u0026#34;form-horizontal\u0026#34;\u0026gt; 15 \u0026lt;div class=\u0026#34;row\u0026#34;\u0026gt; 16 \u0026lt;div class=\u0026#34;col text-center\u0026#34;\u0026gt; 17 \u0026lt;p class=\u0026#34;text-end\u0026#34; th:inline=\u0026#34;text\u0026#34;\u0026gt;Current Date : [[${serverTime}]]\u0026lt;/p\u0026gt; 18 \u0026lt;/div\u0026gt; 19 \u0026lt;/div\u0026gt; 20 \u0026lt;br/\u0026gt; 21 22 \u0026lt;div class=\u0026#34;row\u0026#34;\u0026gt; 23 \u0026lt;div class=\u0026#34;col text-center\u0026#34;\u0026gt; 24 \u0026lt;h2\u0026gt;Customers\u0026lt;/h2\u0026gt; 25 \u0026lt;/div\u0026gt; 26 \u0026lt;/div\u0026gt; 27 \u0026lt;br/\u0026gt; 28 29 \u0026lt;div class=\u0026#34;row\u0026#34;\u0026gt; 30 \u0026lt;div class=\u0026#34;col-4\u0026#34;\u0026gt; 31 \u0026lt;form\u0026gt; 32 \u0026lt;div class=\u0026#34;mb-3\u0026#34;\u0026gt; 33 \u0026lt;label for=\u0026#34;firstName\u0026#34; class=\u0026#34;form-label\u0026#34;\u0026gt;First Name\u0026lt;/label\u0026gt; 34 \u0026lt;input type=\u0026#34;text\u0026#34; name=\u0026#34;firstName\u0026#34; class=\u0026#34;form-control\u0026#34; id=\u0026#34;firstName\u0026#34; aria-describedby=\u0026#34;nameHelp\u0026#34;\u0026gt; 35 \u0026lt;div id=\u0026#34;nameHelp\u0026#34; class=\u0026#34;form-text\u0026#34;\u0026gt;Enter your name!\u0026lt;/div\u0026gt; 36 \u0026lt;/div\u0026gt; 37 \u0026lt;div class=\u0026#34;mb-3\u0026#34;\u0026gt; 38 \u0026lt;label for=\u0026#34;lastName\u0026#34; class=\u0026#34;form-label\u0026#34;\u0026gt;Last Name\u0026lt;/label\u0026gt; 39 \u0026lt;input type=\u0026#34;text\u0026#34; name=\u0026#34;lastName\u0026#34; class=\u0026#34;form-control\u0026#34; id=\u0026#34;lastName\u0026#34;\u0026gt; 40 \u0026lt;/div\u0026gt; 41 \u0026lt;button type=\u0026#34;submit\u0026#34; class=\u0026#34;btn btn-primary\u0026#34;\u0026gt;Submit\u0026lt;/button\u0026gt; 42 \u0026lt;/form\u0026gt; 43 \u0026lt;/div\u0026gt; 44 \u0026lt;div class=\u0026#34;col-8\u0026#34;\u0026gt; 45 \u0026lt;table id=\u0026#34;customer-table\u0026#34; class=\u0026#34;table table-striped table-bordered\u0026#34; style=\u0026#34;width:100%\u0026#34;\u0026gt; 46 \u0026lt;thead\u0026gt; 47 \u0026lt;tr\u0026gt; 48 \u0026lt;th\u0026gt; 49 First Name 50 \u0026lt;/th\u0026gt; 51 \u0026lt;th\u0026gt; 52 Last Name 53 \u0026lt;/th\u0026gt; 54 \u0026lt;th\u0026gt; 55 Action 56 \u0026lt;/th\u0026gt; 57 \u0026lt;/tr\u0026gt; 58 \u0026lt;/thead\u0026gt; 59 \u0026lt;tbody\u0026gt; 60 \u0026lt;tr th:each=\u0026#34;customer: ${customerLst}\u0026#34;\u0026gt; 61 \u0026lt;td th:inline=\u0026#34;text\u0026#34;\u0026gt;[[${customer.firstName}]]\u0026lt;/td\u0026gt; 62 \u0026lt;td th:inline=\u0026#34;text\u0026#34;\u0026gt;[[${customer.lastName}]]\u0026lt;/td\u0026gt; 63 \u0026lt;td\u0026gt;\u0026lt;a th:href=\u0026#34;@{\u0026#39;/delete/\u0026#39; + ${customer.id}}\u0026#34;\u0026lt;i class=\u0026#34;bi bi-trash\u0026#34;\u0026gt;\u0026lt;/i\u0026gt;\u0026lt;/a\u0026gt;\u0026lt;/td\u0026gt; 64 \u0026lt;/tr\u0026gt; 65 \u0026lt;/tbody\u0026gt; 66 67 \u0026lt;/table\u0026gt; 68 \u0026lt;/div\u0026gt; 69 \u0026lt;/div\u0026gt; 70 71 \u0026lt;/form\u0026gt; 72\u0026lt;/div\u0026gt; 73 74\u0026lt;/body\u0026gt; 75\u0026lt;/html\u0026gt; 1\u0026lt;!doctype html\u0026gt; 2\u0026lt;html lang=\u0026#34;en\u0026#34; xmlns:th=\u0026#34;https://www.thymeleaf.org\u0026#34;\u0026gt; 3\u0026lt;head\u0026gt; 4 \u0026lt;div th:replace=\u0026#34;fragments/general :: include-frag\u0026#34;/\u0026gt; 5\u0026lt;/head\u0026gt; 6\u0026lt;body\u0026gt; 7 8\u0026lt;div th:replace=\u0026#34;fragments/general :: login-menu-frag\u0026#34;/\u0026gt; 9 10\u0026lt;div class=\u0026#34;container\u0026#34;\u0026gt; 11 \u0026lt;form method=\u0026#34;post\u0026#34; th:action=\u0026#34;@{/login}\u0026#34; role=\u0026#34;form\u0026#34; class=\u0026#34;form-horizontal\u0026#34; style=\u0026#34;max-width: 400px; margin: auto\u0026#34;\u0026gt; 12 \u0026lt;br/\u0026gt; 13 \u0026lt;br/\u0026gt; 14 \u0026lt;h2\u0026gt;Login\u0026lt;/h2\u0026gt; 15 16 \u0026lt;div class=\u0026#34;mb-3\u0026#34;\u0026gt; 17 \u0026lt;label for=\u0026#34;exampleInputEmail1\u0026#34; class=\u0026#34;form-label\u0026#34;\u0026gt;Username\u0026lt;/label\u0026gt; 18 \u0026lt;input type=\u0026#34;text\u0026#34; name=\u0026#34;username\u0026#34; class=\u0026#34;form-control\u0026#34; id=\u0026#34;exampleInputEmail1\u0026#34; aria-describedby=\u0026#34;usernameHelp\u0026#34;\u0026gt; 19 \u0026lt;div id=\u0026#34;usernameHelp\u0026#34; class=\u0026#34;form-text\u0026#34;\u0026gt;Enter ldap username.\u0026lt;/div\u0026gt; 20 \u0026lt;/div\u0026gt; 21 \u0026lt;div class=\u0026#34;mb-3\u0026#34;\u0026gt; 22 \u0026lt;label for=\u0026#34;exampleInputPassword1\u0026#34; class=\u0026#34;form-label\u0026#34;\u0026gt;Password\u0026lt;/label\u0026gt; 23 \u0026lt;input type=\u0026#34;password\u0026#34; name=\u0026#34;password\u0026#34; class=\u0026#34;form-control\u0026#34; id=\u0026#34;exampleInputPassword1\u0026#34;\u0026gt; 24 \u0026lt;/div\u0026gt; 25 \u0026lt;button type=\u0026#34;submit\u0026#34; class=\u0026#34;btn btn-primary\u0026#34; style=\u0026#34;width:100%\u0026#34;\u0026gt;Submit\u0026lt;/button\u0026gt; 26 27 \u0026lt;br/\u0026gt; 28 \u0026lt;br/\u0026gt; 29 30 \u0026lt;div th:if=\u0026#34;${param.logout}\u0026#34; class=\u0026#34;alert alert-success\u0026#34; role=\u0026#34;alert\u0026#34;\u0026gt; 31 You have been logged out 32 \u0026lt;/div\u0026gt; 33 \u0026lt;div th:if=\u0026#34;${param.error}\u0026#34; class=\u0026#34;alert alert-danger\u0026#34; role=\u0026#34;alert\u0026#34;\u0026gt; 34 Invalid username and password! 35 \u0026lt;/div\u0026gt; 36 \u0026lt;/form\u0026gt; 37 38\u0026lt;/div\u0026gt; 39 40\u0026lt;/body\u0026gt; 41\u0026lt;/html\u0026gt; Setup 1# Project 79 2 3Spring Boot MVC Web project Thymeleaf, Login, Charts 4 5[https://gitorko.github.io/spring-boot-thymeleaf/](https://gitorko.github.io/spring-boot-thymeleaf/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk version \u0026#34;21.0.3\u0026#34; 2024-04-16 LTS 14``` 15 16### Postgres DB 17 18``` 19docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:9.6.10 20docker ps 21docker exec -it pg-container psql -U postgres -W postgres 22CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 23CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 24grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 25 26docker stop pg-container 27docker start pg-container 28``` 29 30### Dev 31 32To run the code. 33 34```bash 35./gradlew clean build 36./gradlew bootRun 37``` 38 39### Prod 40 41To build the uber jar \u0026amp; run the jar. 42 43```bash 44./gradlew clean build 45cd build/libs 46java -jar project79-1.0.0.jar 47``` 48 49Open [http://localhost:8080/](http://localhost:8080/) 50 51``` 52user: admin 53pwd: admin@123 54``` 55 56### Docker 57 58```bash 59./gradlew clean build 60docker build -f docker/Dockerfile --force-rm -t project79:1.0.0 . 61docker images |grep project79 62docker tag project79:1.0.0 gitorko/project79:1.0.0 63docker push gitorko/project79:1.0.0 64docker-compose -f docker/docker-compose.yml up 65``` 66 References https://getbootstrap.com/\nhttps://www.chartjs.org/\n","link":"https://gitorko.github.io/post/spring-thymeleaf/","section":"post","tags":["spring-boot","spring-security","bootstrap","hsqldb","JPA","chart.js","thymeleaf"],"title":"Spring - Thymeleaf"},{"body":"","link":"https://gitorko.github.io/tags/thymeleaf/","section":"tags","tags":null,"title":"Thymeleaf"},{"body":"","link":"https://gitorko.github.io/categories/web/","section":"categories","tags":null,"title":"Web"},{"body":"","link":"https://gitorko.github.io/tags/code-coverage/","section":"tags","tags":null,"title":"Code-Coverage"},{"body":"","link":"https://gitorko.github.io/tags/hot-swap/","section":"tags","tags":null,"title":"Hot-Swap"},{"body":"","link":"https://gitorko.github.io/tags/openapi/","section":"tags","tags":null,"title":"Openapi"},{"body":"","link":"https://gitorko.github.io/tags/vscode/","section":"tags","tags":null,"title":"Vscode"},{"body":"","link":"https://gitorko.github.io/categories/vscode/","section":"categories","tags":null,"title":"VSCode"},{"body":"VSCode is free, open source IDE. We look at some of the tips and tricks to work with VSCode for java developers\nGithub: https://github.com/gitorko/project61\nVSCode Feature 1: Explore Git Clone \u0026amp; Spring Init commands You can use the command palette (Ctrl+Shift+P) to clone repositories, or create new projects using start.spring.io integration.\nFeature 2: Explore Java language support Use language support to avoid typing main: 'public static void main' or sysout: 'System.out.println'\nFeature 3: Explore the Gradle Exentison View the gradle tasks\nGradle Extension Pack\nFeature 4: Hide files you dont wish to view Add this to settings.json\n1\u0026#34;files.exclude\u0026#34;: { 2 \u0026#34;**/.classpath\u0026#34;: true, 3 \u0026#34;**/.DS_Store\u0026#34;: true, 4 \u0026#34;**/.factorypath\u0026#34;: true, 5 \u0026#34;**/.git\u0026#34;: true, 6 \u0026#34;**/.gitignore\u0026#34;: true, 7 \u0026#34;**/.gradle\u0026#34;: true, 8 \u0026#34;**/.hg\u0026#34;: true, 9 \u0026#34;**/.mvn\u0026#34;: true, 10 \u0026#34;**/.project\u0026#34;: true, 11 \u0026#34;**/.settings\u0026#34;: true, 12 \u0026#34;**/.sts4-cache\u0026#34;: true, 13 \u0026#34;**/.svn\u0026#34;: true, 14 \u0026#34;**/.vscode\u0026#34;: true, 15 \u0026#34;**/.idea\u0026#34;: true, 16 \u0026#34;**/out\u0026#34;: true, 17 \u0026#34;**/bin\u0026#34;: true, 18 \u0026#34;**/build\u0026#34;: true, 19 \u0026#34;**/CVS\u0026#34;: true, 20 \u0026#34;**/gradle\u0026#34;: true, 21 \u0026#34;**/target\u0026#34;: true, 22 \u0026#34;**/.attach_pid*\u0026#34;: true, 23 \u0026#34;**/logs\u0026#34;: true 24} Feature 5: Replace tabs with white spaces Add this to settings.json\n1\u0026#34;editor.renderWhitespace\u0026#34;: \u0026#34;all\u0026#34;, 2\u0026#34;editor.insertSpaces\u0026#34;: true, Feature 6: Increase page length to 120 Add this to settings.json to specify line length.\n1\u0026#34;editor.rulers\u0026#34;: [ 2 120 3] Feature 7: Decide on import order Add this to settings.json to specify the import order.\n1\u0026#34;java.completion.importOrder\u0026#34;: [ 2 \u0026#34;java\u0026#34;, 3 \u0026#34;javax\u0026#34;, 4 \u0026#34;org\u0026#34;, 5 \u0026#34;com\u0026#34; 6 ], Use Right click Source Action-\u0026gt;Organize Imports or (Alt+Shift+O)\nFeature 8: Enable eclipse formatting Enable specific formatter.\nFormatter settings\nFormatter xml\n1 \u0026#34;java.format.settings.url\u0026#34; :\u0026#34;file:///Users/home/dev_code-style_formatter.xml\u0026#34;, Use (Ctrl+Shift+I) to format\nFeature 9: Install Lombok plugin Avoid writing boilerplate code with lombok.\nLombok Annotations Support for VS Code\nFeature 10: Add license info to each file This will add a license header to the file.\nlicenser\nAdd this to settings.json\n1\u0026#34;licenser.customHeader\u0026#34;: \u0026#34;Copyright (c) 2021 Company, Inc. All Rights Reserved.\u0026#34;, 2\u0026#34;licenser.customTermsAndConditions\u0026#34;: \u0026#34;\u0026#34;, 3\u0026#34;licenser.license\u0026#34;: \u0026#34;Custom\u0026#34;, 4\u0026#34;licenser.useSingleLineStyle\u0026#34;: false, 5\u0026#34;licenser.author\u0026#34;: \u0026#34;Company\u0026#34;, Use command palette to insert license\nFeature 11: Explore java dependency tree View the dependency tree of the project.\nFeature 12: Explore Git Check git blame inline and view git commits. View the git graph to visualize the tree.\nFeature 13: Explore unit test support \u0026amp; debug unit tests Run the unit tests and view the report.\nFeature 14: Explore checkstyle support View the inline highlight feature. Make the settings change in the workspace instead of global user settings file so that this applies only to the specific project.\nCheckstyle for Java\n1\u0026#34;java.checkstyle.configuration\u0026#34;: \u0026#34;${workspaceFolder}/config/checkstyle/checkstyle.xml\u0026#34; Feature 15: Explore inline code coverage You need xml report enabled for this to work, check build.gradle, after the build the jacocoTestReport.xml is generated that is read by the coverage extension to highlight lines of code not covered by unit tests.\nCoverage Gutters\nIf the coverage file name is different then change the settings.json\n1\u0026#34;coverage-gutters.xmlname\u0026#34;: \u0026#34;jacocoTestReport.xml\u0026#34;, Feature 16: Explore Debugging and Hot Code Replacement/Hot Swap Dock the debugger tool bar.\n1\u0026#34;debug.toolBarLocation\u0026#34;: \u0026#34;docked\u0026#34; 1curl --location --request GET \u0026#39;http://localhost:9090/api/age/10-10-2020\u0026#39; --header \u0026#39;Content-Type: application/json\u0026#39; To enable hot code replace set the following properties, for spring boot projects with dev tools the reload is automatic, if dev tools is not present in the project then you can use Hot code replacement (HCR), which doesn’t require a restart, is a fast debugging technique in which the Java debugger transmits new class files over the debugging channel to the JVM. Make sure 'java.autobuild.enabled' is enabled.\n1\u0026#34;java.debug.settings.hotCodeReplace\u0026#34;: \u0026#34;auto\u0026#34;, 2\u0026#34;java.autobuild.enabled\u0026#34; : true Feature 17: Explore Spring Boot Support Start or debug spring boot application\nGet spring property support\nFeature 18: Create shortcut to key bindings to build project For gradle projects instead of running ./gradlew build each time in terminal you can map it to a task and give a keyboard shortcut.\nAdd this to the tasks.json, everytime you run a task called 'run' it will build the project.\n1{ 2 \u0026#34;version\u0026#34;: \u0026#34;2.0.0\u0026#34;, 3 \u0026#34;tasks\u0026#34;: [ 4 { 5 \u0026#34;label\u0026#34;: \u0026#34;build\u0026#34;, 6 \u0026#34;type\u0026#34;: \u0026#34;shell\u0026#34;, 7 \u0026#34;command\u0026#34;: \u0026#34;./gradlew clean build\u0026#34;, 8 \u0026#34;group\u0026#34;: \u0026#34;none\u0026#34; 9 } 10 ] 11} Now lets create a shortcut goto \u0026quot;Keyboard Shortcuts\u0026quot; and click on '{}' icon. Add this to keybindings.json, now press F6 to build the project\n1[ 2 { 3 \u0026#34;key\u0026#34;: \u0026#34;f6\u0026#34;, 4 \u0026#34;command\u0026#34;: \u0026#34;workbench.action.tasks.runTask\u0026#34;, 5 \u0026#34;args\u0026#34; : \u0026#34;build\u0026#34; 6 } 7] Feature 19: Reading user input To take user input from command line you need to change shell type in launch.json config to integratedTerminal\n1{ 2 \u0026#34;type\u0026#34;: \u0026#34;java\u0026#34;, 3 \u0026#34;name\u0026#34;: \u0026#34;CodeLens (Launch) - Main\u0026#34;, 4 \u0026#34;request\u0026#34;: \u0026#34;launch\u0026#34;, 5 \u0026#34;mainClass\u0026#34;: \u0026#34;com.demo.project61.Application\u0026#34;, 6 \u0026#34;projectName\u0026#34;: \u0026#34;project61\u0026#34;, 7 \u0026#34;console\u0026#34;: \u0026#34;integratedTerminal\u0026#34; 8} Feature 20: Explore Docker Build the docker image\nRun the docker image\nTag the docker image and push it to public docker hub registry. You need to run docker login before pushing the image.\n1docker login Push the image\nFeature 21: Explore Kubernetes Deploy to kubernetes cluster\nView the deployments\nFeature 22: Explore Rest Client Explore Reset client Thunder Client\nFeature 23: Sync the settings Link the accounts to sync the settings\nFeature 24: Connect to DB and query Query a database\nFeature 25: Use live share Share your workspace\nFeature 26: Explore Open API Create Open API spec file and test it\nFeature 27: Shortcuts Goto Implementation - (Ctrl + F12) Goto Terminal - (Ctrl + ~ ) Quick Fix - (Ctrl + . )\nProblems Often times workspace gets corrupted so I delete the storage in %APPDATA%\\Code\\User\\workspaceStorage and restart the IDE to get things back in order. Clean the workspace directory\nWindows : %APPDATA%\\Code[ - Variant]\\User\\workspaceStorage\nMacOS : $HOME/Library/Application Support/Code[ - Variant]/User/workspaceStorage/ Linux : $HOME/.config/Code[ - Variant]/User/workspaceStorage/\nAnother problem often seen is when multiple project exist on workspace but if one of them fails to build then all the projects in the workspace wont work. So for now keep one workspace to one project mapping.\nMaven Execution If you need to execute maven project from command line, You need to add org.codehaus.mojo.exec-maven-plugin in your pom.xml\n1\u0026lt;plugin\u0026gt; 2 \u0026lt;groupId\u0026gt;org.codehaus.mojo\u0026lt;/groupId\u0026gt; 3 \u0026lt;artifactId\u0026gt;exec-maven-plugin\u0026lt;/artifactId\u0026gt; 4 \u0026lt;version\u0026gt;1.6.0\u0026lt;/version\u0026gt; 5\u0026lt;/plugin\u0026gt; Then configure task by 'Ctrl+Shift+P' then 'Tasks: Configure task' and select the project. Edit the tasks.json\n1{ 2 \u0026#34;version\u0026#34;: \u0026#34;2.0.0\u0026#34;, 3 \u0026#34;tasks\u0026#34;: [ 4 { 5 \u0026#34;label\u0026#34;: \u0026#34;run\u0026#34;, 6 \u0026#34;type\u0026#34;: \u0026#34;shell\u0026#34;, 7 \u0026#34;command\u0026#34;: \u0026#34;mvn exec:java \u0026#39;-Dexec.mainClass=com.myproject.Main\u0026#39;\u0026#34;, 8 \u0026#34;group\u0026#34;: \u0026#34;none\u0026#34; 9 } 10 ] 11} Plugins recommended To export the plugins you use\n1code --list-extensions \u0026gt; extensions.list To install all plugins at one time\n1cat extensions.list |% { code --install-extension $_} extensions.list\n142Crunch.vscode-openapi 2Angular.ng-template 3CoenraadS.bracket-pair-colorizer-2 4DavidAnson.vscode-markdownlint 5dbaeumer.vscode-eslint 6DotJoshJohnson.xml 7eamodio.gitlens 8eg2.vscode-npm-script 9GabrielBB.vscode-lombok 10golang.go 11hashicorp.terraform 12humao.rest-client 13jim-moody.drools 14johnpapa.vscode-peacock 15mhutchie.git-graph 16ms-azuretools.vscode-docker 17ms-kubernetes-tools.vscode-kubernetes-tools 18ms-ossdata.vscode-postgresql 19ms-python.python 20ms-python.vscode-pylance 21ms-toolsai.jupyter 22ms-vscode-remote.remote-containers 23ms-vscode-remote.remote-ssh 24ms-vscode-remote.remote-ssh-edit 25ms-vscode-remote.remote-ssh-explorer 26ms-vscode-remote.remote-wsl 27ms-vscode-remote.vscode-remote-extensionpack 28ms-vscode.js-debug-nightly 29ms-vscode.vscode-typescript-next 30ms-vscode.vscode-typescript-tslint-plugin 31ms-vsliveshare.vsliveshare 32msjsdiag.debugger-for-chrome 33msjsdiag.vscode-react-native 34mtxr.sqltools 35mtxr.sqltools-driver-pg 36naco-siren.gradle-language 37Pivotal.vscode-spring-boot 38PKief.material-icon-theme 39rangav.vscode-thunder-client 40redhat.java 41redhat.vscode-commons 42redhat.vscode-xml 43redhat.vscode-yaml 44richardwillis.vscode-gradle 45richardwillis.vscode-gradle-extension-pack 46richardwillis.vscode-spotless-gradle 47ryanluker.vscode-coverage-gutters 48shengchen.vscode-checkstyle 49VisualStudioExptTeam.vscodeintellicode 50vscjava.vscode-java-debug 51vscjava.vscode-java-dependency 52vscjava.vscode-java-pack 53vscjava.vscode-java-test 54vscjava.vscode-maven 55vscjava.vscode-spring-boot-dashboard 56vscjava.vscode-spring-initializr 57vscode-icons-team.vscode-icons 58xabikos.JavaScriptSnippets 59ymotongpoo.licenser 60zhuangtongfa.material-theme References VSCode\nJava in VSCode\nJava Tutorial with VS Code\nSpring Boot with VS Code\nJava Debugging and Testing\n","link":"https://gitorko.github.io/post/vscode-java/","section":"post","tags":["vscode","checkstyle","openapi","code-coverage","docker","kubernetes","hot-swap"],"title":"VSCode Java"},{"body":"Generate a pdf report using jasper reports\nGithub: https://github.com/gitorko/project70\nJasper Report JasperReports is an open source java reporting engine. It can generate different types of reports in this example we look at generating a pdf report with data passed from the java layer. To generate the jasper template you will need to download and install jasper studio. Jasper report also comes with a server but for this demo you dont need to install it.\nhttps://community.jaspersoft.com/download\nCode 1package com.demo.project70; 2 3import java.io.File; 4import java.io.FileOutputStream; 5import java.io.OutputStream; 6import java.util.ArrayList; 7import java.util.HashMap; 8import java.util.List; 9import java.util.Map; 10 11import lombok.AllArgsConstructor; 12import lombok.Data; 13import lombok.NoArgsConstructor; 14import net.sf.jasperreports.engine.JREmptyDataSource; 15import net.sf.jasperreports.engine.JasperCompileManager; 16import net.sf.jasperreports.engine.JasperExportManager; 17import net.sf.jasperreports.engine.JasperFillManager; 18import net.sf.jasperreports.engine.JasperPrint; 19import net.sf.jasperreports.engine.JasperReport; 20import net.sf.jasperreports.engine.data.JRBeanCollectionDataSource; 21import org.springframework.boot.CommandLineRunner; 22import org.springframework.boot.SpringApplication; 23import org.springframework.boot.autoconfigure.SpringBootApplication; 24import org.springframework.context.annotation.Bean; 25 26@SpringBootApplication 27public class Main { 28 29 static final String fileName = \u0026#34;src/main/resources/EmployeeReports.jrxml\u0026#34;; 30 static final String outFile = \u0026#34;EmployeeReports.pdf\u0026#34;; 31 32 public static void main(String[] args) { 33 SpringApplication.run(Main.class, args); 34 } 35 36 @Bean 37 public CommandLineRunner onStart() { 38 return (args) -\u0026gt; { 39 List\u0026lt;Employee\u0026gt; employeeList = new ArrayList\u0026lt;\u0026gt;(); 40 Map\u0026lt;String, Object\u0026gt; parameter = new HashMap\u0026lt;\u0026gt;(); 41 42 employeeList.add(new Employee(1, \u0026#34;Jack Ryan\u0026#34;, 100.0)); 43 employeeList.add(new Employee(2, \u0026#34;Cathy Mueller\u0026#34;, 130.0)); 44 employeeList.add(new Employee(3, \u0026#34;Matice\u0026#34;, 90.0)); 45 46 parameter.put(\u0026#34;employeeDataSource\u0026#34;, new JRBeanCollectionDataSource(employeeList)); 47 parameter.put(\u0026#34;title\u0026#34;, \u0026#34;Employee Report\u0026#34;); 48 49 JasperReport jasperDesign = JasperCompileManager.compileReport(fileName); 50 JasperPrint jasperPrint = JasperFillManager.fillReport(jasperDesign, parameter, new JREmptyDataSource()); 51 52 File file = new File(outFile); 53 OutputStream outputSteam = new FileOutputStream(file); 54 JasperExportManager.exportReportToPdfStream(jasperPrint, outputSteam); 55 56 System.out.println(\u0026#34;Report Generated!\u0026#34;); 57 }; 58 } 59 60 @Data 61 @AllArgsConstructor 62 @NoArgsConstructor 63 public class Employee { 64 private int id; 65 private String name; 66 private Double salary; 67 } 68} 69 70 1\u0026lt;?xml version=\u0026#34;1.0\u0026#34; encoding=\u0026#34;UTF-8\u0026#34;?\u0026gt; 2\u0026lt;!-- Created with Jaspersoft Studio version 6.14.0.final using JasperReports Library version 6.14.0-2ab0d8625be255bf609c78e1181801213e51db8f --\u0026gt; 3\u0026lt;jasperReport xmlns=\u0026#34;http://jasperreports.sourceforge.net/jasperreports\u0026#34; xmlns:xsi=\u0026#34;http://www.w3.org/2001/XMLSchema-instance\u0026#34; xsi:schemaLocation=\u0026#34;http://jasperreports.sourceforge.net/jasperreports http://jasperreports.sourceforge.net/xsd/jasperreport.xsd\u0026#34; name=\u0026#34;EmployeeReports\u0026#34; pageWidth=\u0026#34;595\u0026#34; pageHeight=\u0026#34;842\u0026#34; columnWidth=\u0026#34;555\u0026#34; leftMargin=\u0026#34;20\u0026#34; rightMargin=\u0026#34;20\u0026#34; topMargin=\u0026#34;20\u0026#34; bottomMargin=\u0026#34;20\u0026#34; uuid=\u0026#34;78066d92-d5f8-4a86-bde2-9824be76fbdf\u0026#34;\u0026gt; 4 \u0026lt;property name=\u0026#34;com.jaspersoft.studio.data.defaultdataadapter\u0026#34; value=\u0026#34;One Empty Record\u0026#34;/\u0026gt; 5 \u0026lt;style name=\u0026#34;Table_TH\u0026#34; mode=\u0026#34;Opaque\u0026#34; backcolor=\u0026#34;#F0F8FF\u0026#34;\u0026gt; 6 \u0026lt;box\u0026gt; 7 \u0026lt;pen lineWidth=\u0026#34;0.5\u0026#34; lineColor=\u0026#34;#000000\u0026#34;/\u0026gt; 8 \u0026lt;topPen lineWidth=\u0026#34;0.5\u0026#34; lineColor=\u0026#34;#000000\u0026#34;/\u0026gt; 9 \u0026lt;leftPen lineWidth=\u0026#34;0.5\u0026#34; lineColor=\u0026#34;#000000\u0026#34;/\u0026gt; 10 \u0026lt;bottomPen lineWidth=\u0026#34;0.5\u0026#34; lineColor=\u0026#34;#000000\u0026#34;/\u0026gt; 11 \u0026lt;rightPen lineWidth=\u0026#34;0.5\u0026#34; lineColor=\u0026#34;#000000\u0026#34;/\u0026gt; 12 \u0026lt;/box\u0026gt; 13 \u0026lt;/style\u0026gt; 14 \u0026lt;style name=\u0026#34;Table_CH\u0026#34; mode=\u0026#34;Opaque\u0026#34; backcolor=\u0026#34;#BFE1FF\u0026#34;\u0026gt; 15 \u0026lt;box\u0026gt; 16 \u0026lt;pen lineWidth=\u0026#34;0.5\u0026#34; lineColor=\u0026#34;#000000\u0026#34;/\u0026gt; 17 \u0026lt;topPen lineWidth=\u0026#34;0.5\u0026#34; lineColor=\u0026#34;#000000\u0026#34;/\u0026gt; 18 \u0026lt;leftPen lineWidth=\u0026#34;0.5\u0026#34; lineColor=\u0026#34;#000000\u0026#34;/\u0026gt; 19 \u0026lt;bottomPen lineWidth=\u0026#34;0.5\u0026#34; lineColor=\u0026#34;#000000\u0026#34;/\u0026gt; 20 \u0026lt;rightPen lineWidth=\u0026#34;0.5\u0026#34; lineColor=\u0026#34;#000000\u0026#34;/\u0026gt; 21 \u0026lt;/box\u0026gt; 22 \u0026lt;/style\u0026gt; 23 \u0026lt;style name=\u0026#34;Table_TD\u0026#34; mode=\u0026#34;Opaque\u0026#34; backcolor=\u0026#34;#FFFFFF\u0026#34;\u0026gt; 24 \u0026lt;box\u0026gt; 25 \u0026lt;pen lineWidth=\u0026#34;0.5\u0026#34; lineColor=\u0026#34;#000000\u0026#34;/\u0026gt; 26 \u0026lt;topPen lineWidth=\u0026#34;0.5\u0026#34; lineColor=\u0026#34;#000000\u0026#34;/\u0026gt; 27 \u0026lt;leftPen lineWidth=\u0026#34;0.5\u0026#34; lineColor=\u0026#34;#000000\u0026#34;/\u0026gt; 28 \u0026lt;bottomPen lineWidth=\u0026#34;0.5\u0026#34; lineColor=\u0026#34;#000000\u0026#34;/\u0026gt; 29 \u0026lt;rightPen lineWidth=\u0026#34;0.5\u0026#34; lineColor=\u0026#34;#000000\u0026#34;/\u0026gt; 30 \u0026lt;/box\u0026gt; 31 \u0026lt;/style\u0026gt; 32 \u0026lt;subDataset name=\u0026#34;employeeDataSet\u0026#34; uuid=\u0026#34;9651295c-429d-420d-9e5b-42055e73efea\u0026#34;\u0026gt; 33 \u0026lt;property name=\u0026#34;com.jaspersoft.studio.data.defaultdataadapter\u0026#34; value=\u0026#34;One Empty Record\u0026#34;/\u0026gt; 34 \u0026lt;queryString\u0026gt; 35 \u0026lt;![CDATA[]]\u0026gt; 36 \u0026lt;/queryString\u0026gt; 37 \u0026lt;field name=\u0026#34;id\u0026#34; class=\u0026#34;java.lang.Integer\u0026#34;/\u0026gt; 38 \u0026lt;field name=\u0026#34;name\u0026#34; class=\u0026#34;java.lang.String\u0026#34;/\u0026gt; 39 \u0026lt;field name=\u0026#34;salary\u0026#34; class=\u0026#34;java.lang.Double\u0026#34;/\u0026gt; 40 \u0026lt;/subDataset\u0026gt; 41 \u0026lt;parameter name=\u0026#34;title\u0026#34; class=\u0026#34;java.lang.String\u0026#34;/\u0026gt; 42 \u0026lt;parameter name=\u0026#34;employeeDataSource\u0026#34; class=\u0026#34;net.sf.jasperreports.engine.data.JRBeanCollectionDataSource\u0026#34;/\u0026gt; 43 \u0026lt;queryString\u0026gt; 44 \u0026lt;![CDATA[]]\u0026gt; 45 \u0026lt;/queryString\u0026gt; 46 \u0026lt;background\u0026gt; 47 \u0026lt;band splitType=\u0026#34;Stretch\u0026#34;/\u0026gt; 48 \u0026lt;/background\u0026gt; 49 \u0026lt;title\u0026gt; 50 \u0026lt;band height=\u0026#34;79\u0026#34; splitType=\u0026#34;Stretch\u0026#34;\u0026gt; 51 \u0026lt;textField\u0026gt; 52 \u0026lt;reportElement x=\u0026#34;250\u0026#34; y=\u0026#34;20\u0026#34; width=\u0026#34;100\u0026#34; height=\u0026#34;30\u0026#34; uuid=\u0026#34;2f6bf147-59e5-4468-84b4-bf89a58646ef\u0026#34;/\u0026gt; 53 \u0026lt;textElement\u0026gt; 54 \u0026lt;font size=\u0026#34;12\u0026#34;/\u0026gt; 55 \u0026lt;/textElement\u0026gt; 56 \u0026lt;textFieldExpression\u0026gt;\u0026lt;![CDATA[$P{title}]]\u0026gt;\u0026lt;/textFieldExpression\u0026gt; 57 \u0026lt;/textField\u0026gt; 58 \u0026lt;/band\u0026gt; 59 \u0026lt;/title\u0026gt; 60 \u0026lt;pageHeader\u0026gt; 61 \u0026lt;band height=\u0026#34;35\u0026#34; splitType=\u0026#34;Stretch\u0026#34;/\u0026gt; 62 \u0026lt;/pageHeader\u0026gt; 63 \u0026lt;columnHeader\u0026gt; 64 \u0026lt;band height=\u0026#34;61\u0026#34; splitType=\u0026#34;Stretch\u0026#34;/\u0026gt; 65 \u0026lt;/columnHeader\u0026gt; 66 \u0026lt;detail\u0026gt; 67 \u0026lt;band height=\u0026#34;246\u0026#34; splitType=\u0026#34;Stretch\u0026#34;\u0026gt; 68 \u0026lt;componentElement\u0026gt; 69 \u0026lt;reportElement x=\u0026#34;180\u0026#34; y=\u0026#34;30\u0026#34; width=\u0026#34;200\u0026#34; height=\u0026#34;200\u0026#34; uuid=\u0026#34;63d288f4-369c-494f-85b0-f0108b22765e\u0026#34;\u0026gt; 70 \u0026lt;property name=\u0026#34;com.jaspersoft.studio.layout\u0026#34; value=\u0026#34;com.jaspersoft.studio.editor.layout.VerticalRowLayout\u0026#34;/\u0026gt; 71 \u0026lt;property name=\u0026#34;com.jaspersoft.studio.table.style.table_header\u0026#34; value=\u0026#34;Table_TH\u0026#34;/\u0026gt; 72 \u0026lt;property name=\u0026#34;com.jaspersoft.studio.table.style.column_header\u0026#34; value=\u0026#34;Table_CH\u0026#34;/\u0026gt; 73 \u0026lt;property name=\u0026#34;com.jaspersoft.studio.table.style.detail\u0026#34; value=\u0026#34;Table_TD\u0026#34;/\u0026gt; 74 \u0026lt;/reportElement\u0026gt; 75 \u0026lt;jr:table xmlns:jr=\u0026#34;http://jasperreports.sourceforge.net/jasperreports/components\u0026#34; xsi:schemaLocation=\u0026#34;http://jasperreports.sourceforge.net/jasperreports/components http://jasperreports.sourceforge.net/xsd/components.xsd\u0026#34;\u0026gt; 76 \u0026lt;datasetRun subDataset=\u0026#34;employeeDataSet\u0026#34; uuid=\u0026#34;6fe4bdfe-9dd2-4d5e-b94b-c951044cc1cf\u0026#34;\u0026gt; 77 \u0026lt;dataSourceExpression\u0026gt;\u0026lt;![CDATA[$P{employeeDataSource}]]\u0026gt;\u0026lt;/dataSourceExpression\u0026gt; 78 \u0026lt;/datasetRun\u0026gt; 79 \u0026lt;jr:column width=\u0026#34;66\u0026#34; uuid=\u0026#34;4ba6dc06-71ed-4eac-925f-3940726ecfa8\u0026#34;\u0026gt; 80 \u0026lt;jr:tableHeader style=\u0026#34;Table_TH\u0026#34; height=\u0026#34;30\u0026#34;/\u0026gt; 81 \u0026lt;jr:tableFooter style=\u0026#34;Table_TH\u0026#34; height=\u0026#34;30\u0026#34;/\u0026gt; 82 \u0026lt;jr:columnHeader style=\u0026#34;Table_CH\u0026#34; height=\u0026#34;30\u0026#34;\u0026gt; 83 \u0026lt;staticText\u0026gt; 84 \u0026lt;reportElement x=\u0026#34;0\u0026#34; y=\u0026#34;0\u0026#34; width=\u0026#34;66\u0026#34; height=\u0026#34;30\u0026#34; uuid=\u0026#34;2fa1464e-80ba-4599-b563-c9f922460fc6\u0026#34;/\u0026gt; 85 \u0026lt;text\u0026gt;\u0026lt;![CDATA[id]]\u0026gt;\u0026lt;/text\u0026gt; 86 \u0026lt;/staticText\u0026gt; 87 \u0026lt;/jr:columnHeader\u0026gt; 88 \u0026lt;jr:columnFooter style=\u0026#34;Table_CH\u0026#34; height=\u0026#34;30\u0026#34;/\u0026gt; 89 \u0026lt;jr:detailCell style=\u0026#34;Table_TD\u0026#34; height=\u0026#34;30\u0026#34;\u0026gt; 90 \u0026lt;textField\u0026gt; 91 \u0026lt;reportElement x=\u0026#34;0\u0026#34; y=\u0026#34;0\u0026#34; width=\u0026#34;66\u0026#34; height=\u0026#34;30\u0026#34; uuid=\u0026#34;fcae93e6-a2e8-44d2-b05a-18cf222b2580\u0026#34;/\u0026gt; 92 \u0026lt;textFieldExpression\u0026gt;\u0026lt;![CDATA[$F{id}]]\u0026gt;\u0026lt;/textFieldExpression\u0026gt; 93 \u0026lt;/textField\u0026gt; 94 \u0026lt;/jr:detailCell\u0026gt; 95 \u0026lt;/jr:column\u0026gt; 96 \u0026lt;jr:column width=\u0026#34;66\u0026#34; uuid=\u0026#34;a9abc185-068b-44b0-ba3e-c52793b91a90\u0026#34;\u0026gt; 97 \u0026lt;jr:tableHeader style=\u0026#34;Table_TH\u0026#34; height=\u0026#34;30\u0026#34;/\u0026gt; 98 \u0026lt;jr:tableFooter style=\u0026#34;Table_TH\u0026#34; height=\u0026#34;30\u0026#34;/\u0026gt; 99 \u0026lt;jr:columnHeader style=\u0026#34;Table_CH\u0026#34; height=\u0026#34;30\u0026#34;\u0026gt; 100 \u0026lt;staticText\u0026gt; 101 \u0026lt;reportElement x=\u0026#34;0\u0026#34; y=\u0026#34;0\u0026#34; width=\u0026#34;66\u0026#34; height=\u0026#34;30\u0026#34; uuid=\u0026#34;02aa9397-a647-477d-ae35-d87a6d2c7bc1\u0026#34;/\u0026gt; 102 \u0026lt;text\u0026gt;\u0026lt;![CDATA[name]]\u0026gt;\u0026lt;/text\u0026gt; 103 \u0026lt;/staticText\u0026gt; 104 \u0026lt;/jr:columnHeader\u0026gt; 105 \u0026lt;jr:columnFooter style=\u0026#34;Table_CH\u0026#34; height=\u0026#34;30\u0026#34;/\u0026gt; 106 \u0026lt;jr:detailCell style=\u0026#34;Table_TD\u0026#34; height=\u0026#34;30\u0026#34;\u0026gt; 107 \u0026lt;textField\u0026gt; 108 \u0026lt;reportElement x=\u0026#34;0\u0026#34; y=\u0026#34;0\u0026#34; width=\u0026#34;66\u0026#34; height=\u0026#34;30\u0026#34; uuid=\u0026#34;f13bc7e1-3206-4e65-9377-9e488acec741\u0026#34;/\u0026gt; 109 \u0026lt;textFieldExpression\u0026gt;\u0026lt;![CDATA[$F{name}]]\u0026gt;\u0026lt;/textFieldExpression\u0026gt; 110 \u0026lt;/textField\u0026gt; 111 \u0026lt;/jr:detailCell\u0026gt; 112 \u0026lt;/jr:column\u0026gt; 113 \u0026lt;jr:column width=\u0026#34;66\u0026#34; uuid=\u0026#34;d0e58992-65cf-48e0-a03a-3fc71f88c1da\u0026#34;\u0026gt; 114 \u0026lt;jr:tableHeader style=\u0026#34;Table_TH\u0026#34; height=\u0026#34;30\u0026#34;/\u0026gt; 115 \u0026lt;jr:tableFooter style=\u0026#34;Table_TH\u0026#34; height=\u0026#34;30\u0026#34;/\u0026gt; 116 \u0026lt;jr:columnHeader style=\u0026#34;Table_CH\u0026#34; height=\u0026#34;30\u0026#34;\u0026gt; 117 \u0026lt;staticText\u0026gt; 118 \u0026lt;reportElement x=\u0026#34;0\u0026#34; y=\u0026#34;0\u0026#34; width=\u0026#34;66\u0026#34; height=\u0026#34;30\u0026#34; uuid=\u0026#34;cb76f4a3-45a1-4b8c-9e41-e4e586c79496\u0026#34;/\u0026gt; 119 \u0026lt;text\u0026gt;\u0026lt;![CDATA[salary]]\u0026gt;\u0026lt;/text\u0026gt; 120 \u0026lt;/staticText\u0026gt; 121 \u0026lt;/jr:columnHeader\u0026gt; 122 \u0026lt;jr:columnFooter style=\u0026#34;Table_CH\u0026#34; height=\u0026#34;30\u0026#34;/\u0026gt; 123 \u0026lt;jr:detailCell style=\u0026#34;Table_TD\u0026#34; height=\u0026#34;30\u0026#34;\u0026gt; 124 \u0026lt;textField\u0026gt; 125 \u0026lt;reportElement x=\u0026#34;0\u0026#34; y=\u0026#34;0\u0026#34; width=\u0026#34;66\u0026#34; height=\u0026#34;30\u0026#34; uuid=\u0026#34;76129bc3-7141-42f8-9059-3fb60b79ec4d\u0026#34;/\u0026gt; 126 \u0026lt;textFieldExpression\u0026gt;\u0026lt;![CDATA[$F{salary}]]\u0026gt;\u0026lt;/textFieldExpression\u0026gt; 127 \u0026lt;/textField\u0026gt; 128 \u0026lt;/jr:detailCell\u0026gt; 129 \u0026lt;/jr:column\u0026gt; 130 \u0026lt;/jr:table\u0026gt; 131 \u0026lt;/componentElement\u0026gt; 132 \u0026lt;/band\u0026gt; 133 \u0026lt;/detail\u0026gt; 134 \u0026lt;columnFooter\u0026gt; 135 \u0026lt;band height=\u0026#34;45\u0026#34; splitType=\u0026#34;Stretch\u0026#34;/\u0026gt; 136 \u0026lt;/columnFooter\u0026gt; 137 \u0026lt;pageFooter\u0026gt; 138 \u0026lt;band height=\u0026#34;54\u0026#34; splitType=\u0026#34;Stretch\u0026#34;/\u0026gt; 139 \u0026lt;/pageFooter\u0026gt; 140 \u0026lt;summary\u0026gt; 141 \u0026lt;band height=\u0026#34;42\u0026#34; splitType=\u0026#34;Stretch\u0026#34;/\u0026gt; 142 \u0026lt;/summary\u0026gt; 143\u0026lt;/jasperReport\u0026gt; Run the project to generate the EmployeeReports.pdf file.\nSetup 1# Project 70 2 3Jasper Report with Spring 4 5[https://gitorko.github.io/jasper-reports-spring/](https://gitorko.github.io/jasper-reports-spring/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk version \u0026#34;21.0.3\u0026#34; 2024-04-16 LTS 14``` 15 16### Dev 17 18To generate the pdf report. 19 20```bash 21./gradlew clean build 22./gradlew bootRun 23``` To create the jasper report template file you can use jasper studio\nReferences https://community.jaspersoft.com/\n","link":"https://gitorko.github.io/post/jasper-reports-spring/","section":"post","tags":["jasper-report"],"title":"Jasper Reports with Spring"},{"body":"","link":"https://gitorko.github.io/tags/jasper-report/","section":"tags","tags":null,"title":"Jasper-Report"},{"body":"","link":"https://gitorko.github.io/categories/reports/","section":"categories","tags":null,"title":"Reports"},{"body":"","link":"https://gitorko.github.io/tags/freemarker/","section":"tags","tags":null,"title":"Freemarker"},{"body":"HTML reports generated with freemarker templates\nGithub: https://github.com/gitorko/project69\nFreemarker We will generate a single html file report using freemarker template and provide a rest url to download the report.\nCode 1package com.demo; 2 3import java.io.ByteArrayInputStream; 4import java.io.StringWriter; 5import java.util.Date; 6import java.util.HashMap; 7import java.util.List; 8import java.util.Map; 9import java.util.Random; 10import java.util.stream.Collectors; 11import java.util.stream.IntStream; 12 13import freemarker.template.Configuration; 14import freemarker.template.Template; 15import lombok.extern.slf4j.Slf4j; 16import org.springframework.core.io.InputStreamResource; 17import org.springframework.http.HttpHeaders; 18import org.springframework.http.MediaType; 19import org.springframework.http.ResponseEntity; 20import org.springframework.web.bind.annotation.GetMapping; 21import org.springframework.web.bind.annotation.RestController; 22 23@RestController 24@Slf4j 25public class HomeController { 26 27 @GetMapping(\u0026#34;/report\u0026#34;) 28 public ResponseEntity\u0026lt;InputStreamResource\u0026gt; getReport() throws Exception { 29 log.info(\u0026#34;Generating report!\u0026#34;); 30 String htmlReport = this.generateHtmlReport(); 31 ByteArrayInputStream bis = new ByteArrayInputStream(htmlReport.getBytes()); 32 return ResponseEntity.ok() 33 .contentType(MediaType.APPLICATION_OCTET_STREAM) 34 .header(HttpHeaders.CONTENT_DISPOSITION, \u0026#34;attachment; filename=\\\u0026#34;myreport.htm\\\u0026#34;\u0026#34;) 35 .body(new InputStreamResource(bis)); 36 } 37 38 private String generateHtmlReport() throws Exception { 39 Configuration cfg = new Configuration(Configuration.VERSION_2_3_30); 40 cfg.setClassForTemplateLoading(this.getClass(), \u0026#34;/\u0026#34;); 41 cfg.setDefaultEncoding(\u0026#34;UTF-8\u0026#34;); 42 Template template = cfg.getTemplate(\u0026#34;templates/my-report.ftl\u0026#34;); 43 List\u0026lt;Employee\u0026gt; employees = getEmployeeData(); 44 Map\u0026lt;String, Object\u0026gt; templateData = new HashMap\u0026lt;\u0026gt;(); 45 templateData.put(\u0026#34;reportTitle\u0026#34;, \u0026#34;Company Employee Report\u0026#34;); 46 templateData.put(\u0026#34;employees\u0026#34;, employees); 47 StringWriter out = new StringWriter(); 48 template.process(templateData, out); 49 return out.toString(); 50 } 51 52 private List\u0026lt;Employee\u0026gt; getEmployeeData() { 53 //Sample Data 54 Random random = new Random(); 55 return IntStream.range(0, 150) 56 .mapToObj(i -\u0026gt; Employee.builder() 57 .name(\u0026#34;Name \u0026#34; + i) 58 .age(random.nextInt(65 - 20) + 20) 59 .dob(new Date()) 60 .salary(40000 + (100000 - 40000) * random.nextDouble()) 61 .build()) 62 .collect(Collectors.toList()); 63 } 64} 1\u0026lt;html\u0026gt; 2 3\u0026lt;head\u0026gt; 4 \u0026lt;title\u0026gt;Employee Report\u0026lt;/title\u0026gt; 5 \u0026lt;style\u0026gt; 6 table { 7 font-family: arial, sans-serif; 8 border-collapse: collapse; 9 width: 100%; 10 } 11 12 td, 13 th { 14 border: 1px solid #DDDDDD; 15 text-align: left; 16 padding: 8px; 17 } 18 19 th { 20 background-color: #CCCCCC; 21 } 22 23 p.yellow { 24 color: #FFDC0B; 25 font-weight: bold; 26 } 27 28 p.green { 29 color: #2F8400; 30 font-weight: bold; 31 } 32 \u0026lt;/style\u0026gt; 33\u0026lt;/head\u0026gt; 34 35\u0026lt;body\u0026gt; 36\u0026lt;h3\u0026gt;${(reportTitle)!\u0026#34;Default Title\u0026#34;}\u0026lt;/h3\u0026gt; 37\u0026lt;br/\u0026gt; 38 39\u0026lt;h4\u0026gt;Employee Details\u0026lt;/h4\u0026gt; 40\u0026lt;table\u0026gt; 41 \u0026lt;tr\u0026gt; 42 \u0026lt;th\u0026gt;Id\u0026lt;/th\u0026gt; 43 \u0026lt;th\u0026gt;Name\u0026lt;/th\u0026gt; 44 \u0026lt;th\u0026gt;Age\u0026lt;/th\u0026gt; 45 \u0026lt;th\u0026gt;Dob\u0026lt;/th\u0026gt; 46 \u0026lt;th\u0026gt;Salary\u0026lt;/th\u0026gt; 47 \u0026lt;/tr\u0026gt; 48 \u0026lt;#assign empCounter=1\u0026gt; 49 \u0026lt;#list employees as empObj\u0026gt; 50 \u0026lt;tr\u0026gt; 51 \u0026lt;td\u0026gt;${empCounter}\u0026lt;/td\u0026gt; 52 \u0026lt;td\u0026gt; 53 \u0026lt;a href=\u0026#34;#\u0026#34;\u0026gt;${empObj.name}\u0026lt;/a\u0026gt; 54 \u0026lt;/td\u0026gt; 55 \u0026lt;td\u0026gt; 56 ${empObj.age} 57 \u0026lt;/td\u0026gt; 58 \u0026lt;td\u0026gt; 59 ${empObj.dob?date} 60 \u0026lt;/td\u0026gt; 61 \u0026lt;td\u0026gt; 62 \u0026lt;#if empObj.salary gt 50000\u0026gt; 63 \u0026lt;p class=\u0026#34;green\u0026#34;\u0026gt; 64 ${empObj.salary} 65 \u0026lt;/p\u0026gt; 66 \u0026lt;#else\u0026gt; 67 \u0026lt;p class=\u0026#34;yellow\u0026#34;\u0026gt; 68 ${empObj.salary} 69 \u0026lt;/p\u0026gt; 70 \u0026lt;/#if\u0026gt; 71 \u0026lt;/td\u0026gt; 72 \u0026lt;/tr\u0026gt; 73 \u0026lt;#assign empCounter=empCounter+1\u0026gt; 74 \u0026lt;/#list\u0026gt; 75\u0026lt;/table\u0026gt; 76\u0026lt;h2\u0026gt;Total Employees: ${employees?size}\u0026lt;/h2\u0026gt; 77\u0026lt;/body\u0026gt; 78\u0026lt;/html\u0026gt; Setup 1# Project 69 2 3HTML reports with freemarker 4 5[https://gitorko.github.io/freemarker-reports/](https://gitorko.github.io/freemarker-reports/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk version \u0026#34;21.0.3\u0026#34; 2024-04-16 LTS 14``` 15 16### Dev 17 18To run the backend in dev mode. 19 20```bash 21./gradlew clean build 22./gradlew bootRun 23``` References https://freemarker.apache.org/\n","link":"https://gitorko.github.io/post/freemarker-reports/","section":"post","tags":["freemarker"],"title":"Freemarker Reports"},{"body":"","link":"https://gitorko.github.io/tags/design-pattern/","section":"tags","tags":null,"title":"Design-Pattern"},{"body":"Scatter Gather enterprise integration pattern is used for scenarios such as \u0026quot;best quote\u0026quot;, where we need to request information from several suppliers and decide which one provides us with the best price for the requested item.\nGithub: https://github.com/gitorko/project01\nScatter Gather Pattern So we have a book product and we need to fetch the price from various sources and at max we can wait for 3 seconds. You could use a Thread.sleep or Threads join() method but then if the tasks complete before 3 seconds the tasks will still wait for 3 seconds before returning.\nCode We can use a CountDownLatch to wait for the prices to be fetched. It will wait only for 3 seconds and return the prices fetched.\n1package com.demo.basics.designpatterns._24_scattergather.latch; 2 3import java.util.Map; 4import java.util.concurrent.ConcurrentHashMap; 5import java.util.concurrent.CountDownLatch; 6import java.util.concurrent.ExecutorService; 7import java.util.concurrent.Executors; 8import java.util.concurrent.TimeUnit; 9 10import lombok.AllArgsConstructor; 11import lombok.SneakyThrows; 12import org.junit.jupiter.api.Test; 13 14public class ScatterGatherLatchTest { 15 16 ExecutorService threadPool = Executors.newCachedThreadPool(); 17 18 @Test 19 public void test() { 20 Map\u0026lt;String, Float\u0026gt; book1Prices = new ScatterGatherLatchTest().getPrices(\u0026#34;book1\u0026#34;); 21 System.out.println(book1Prices); 22 } 23 24 @SneakyThrows 25 private Map\u0026lt;String, Float\u0026gt; getPrices(String productId) { 26 Map\u0026lt;String, Float\u0026gt; prices = new ConcurrentHashMap\u0026lt;\u0026gt;(); 27 CountDownLatch latch = new CountDownLatch(3); 28 threadPool.submit(new FetchData(\u0026#34;http://amazon\u0026#34;, productId, prices, latch)); 29 threadPool.submit(new FetchData(\u0026#34;http://ebay\u0026#34;, productId, prices, latch)); 30 threadPool.submit(new FetchData(\u0026#34;http://flipkart\u0026#34;, productId, prices, latch)); 31 latch.await(3, TimeUnit.SECONDS); 32 threadPool.shutdown(); 33 return prices; 34 } 35 36 @AllArgsConstructor 37 class FetchData implements Runnable { 38 39 String url; 40 String productId; 41 Map\u0026lt;String, Float\u0026gt; prices; 42 CountDownLatch latch; 43 44 @SneakyThrows 45 @Override 46 public void run() { 47 if (url.contains(\u0026#34;amazon\u0026#34;)) { 48 //http fetch from amazon 49 System.out.println(\u0026#34;Fetching price from amazon!\u0026#34;); 50 TimeUnit.SECONDS.sleep(2); 51 prices.put(\u0026#34;amazon\u0026#34;, 2.35f); 52 latch.countDown(); 53 } 54 55 if (url.contains(\u0026#34;ebay\u0026#34;)) { 56 System.out.println(\u0026#34;Fetching price from ebay!\u0026#34;); 57 //http fetch from ebay 58 TimeUnit.SECONDS.sleep(4); 59 prices.put(\u0026#34;ebay\u0026#34;, 2.30f); 60 latch.countDown(); 61 } 62 63 if (url.contains(\u0026#34;flipkart\u0026#34;)) { 64 System.out.println(\u0026#34;Fetching price from flipkart!\u0026#34;); 65 //http fetch from flipkart 66 TimeUnit.SECONDS.sleep(1); 67 prices.put(\u0026#34;flipkart\u0026#34;, 2.10f); 68 latch.countDown(); 69 } 70 } 71 } 72} 73 We can also use the invokeAll method\n1package com.demo.basics.designpatterns._24_scattergather.invoke; 2 3import java.util.ArrayList; 4import java.util.List; 5import java.util.Map; 6import java.util.concurrent.Callable; 7import java.util.concurrent.ConcurrentHashMap; 8import java.util.concurrent.ExecutorService; 9import java.util.concurrent.Executors; 10import java.util.concurrent.TimeUnit; 11 12import lombok.AllArgsConstructor; 13import lombok.SneakyThrows; 14import org.junit.jupiter.api.Test; 15 16public class ScatterGatherInvokeTest { 17 ExecutorService threadPool = Executors.newCachedThreadPool(); 18 19 @Test 20 public void test() { 21 Map\u0026lt;String, Float\u0026gt; book1Prices = new ScatterGatherInvokeTest().getPrices(\u0026#34;book1\u0026#34;); 22 System.out.println(book1Prices); 23 } 24 25 @SneakyThrows 26 private Map\u0026lt;String, Float\u0026gt; getPrices(String productId) { 27 Map\u0026lt;String, Float\u0026gt; prices = new ConcurrentHashMap\u0026lt;\u0026gt;(); 28 List\u0026lt;Callable\u0026lt;Void\u0026gt;\u0026gt; tasks = new ArrayList\u0026lt;\u0026gt;(); 29 30 tasks.add(new FetchData(\u0026#34;http://amazon\u0026#34;, productId, prices)); 31 tasks.add(new FetchData(\u0026#34;http://ebay\u0026#34;, productId, prices)); 32 tasks.add(new FetchData(\u0026#34;http://flipkart\u0026#34;, productId, prices)); 33 threadPool.invokeAll(tasks, 3, TimeUnit.SECONDS); 34 threadPool.shutdown(); 35 return prices; 36 } 37 38 @AllArgsConstructor 39 class FetchData implements Callable\u0026lt;Void\u0026gt; { 40 41 String url; 42 String productId; 43 Map\u0026lt;String, Float\u0026gt; prices; 44 45 @Override 46 @SneakyThrows 47 public Void call() throws Exception { 48 if (url.contains(\u0026#34;amazon\u0026#34;)) { 49 //http fetch from amazon 50 System.out.println(\u0026#34;Fetching price from amazon!\u0026#34;); 51 TimeUnit.SECONDS.sleep(2); 52 prices.put(\u0026#34;amazon\u0026#34;, 2.35f); 53 } 54 55 if (url.contains(\u0026#34;ebay\u0026#34;)) { 56 System.out.println(\u0026#34;Fetching price from ebay!\u0026#34;); 57 //http fetch from ebay 58 TimeUnit.SECONDS.sleep(4); 59 prices.put(\u0026#34;ebay\u0026#34;, 2.30f); 60 } 61 62 if (url.contains(\u0026#34;flipkart\u0026#34;)) { 63 System.out.println(\u0026#34;Fetching price from flipkart!\u0026#34;); 64 //http fetch from flipkart 65 TimeUnit.SECONDS.sleep(1); 66 prices.put(\u0026#34;flipkart\u0026#34;, 2.10f); 67 } 68 return null; 69 } 70 } 71} 72 73 We can also use the CompletableFuture.\n1package com.demo.basics.designpatterns._24_scattergather.completable; 2 3import java.util.Map; 4import java.util.concurrent.CompletableFuture; 5import java.util.concurrent.ConcurrentHashMap; 6import java.util.concurrent.ExecutorService; 7import java.util.concurrent.Executors; 8import java.util.concurrent.TimeUnit; 9import java.util.concurrent.TimeoutException; 10 11import lombok.AllArgsConstructor; 12import lombok.SneakyThrows; 13import org.junit.jupiter.api.Test; 14 15public class ScatterGatherCompletableTest { 16 ExecutorService threadPool = Executors.newCachedThreadPool(); 17 18 @Test 19 public void test() { 20 Map\u0026lt;String, Float\u0026gt; book1Prices = new ScatterGatherCompletableTest().getPrices(\u0026#34;book1\u0026#34;); 21 System.out.println(book1Prices); 22 } 23 24 @SneakyThrows 25 private Map\u0026lt;String, Float\u0026gt; getPrices(String productId) { 26 Map\u0026lt;String, Float\u0026gt; prices = new ConcurrentHashMap\u0026lt;\u0026gt;(); 27 28 CompletableFuture\u0026lt;Void\u0026gt; task1 = CompletableFuture.runAsync(new FetchData(\u0026#34;http://amazon\u0026#34;, productId, prices)); 29 CompletableFuture\u0026lt;Void\u0026gt; task2 = CompletableFuture.runAsync(new FetchData(\u0026#34;http://ebay\u0026#34;, productId, prices)); 30 CompletableFuture\u0026lt;Void\u0026gt; task3 = CompletableFuture.runAsync(new FetchData(\u0026#34;http://flipkart\u0026#34;, productId, prices)); 31 32 CompletableFuture\u0026lt;Void\u0026gt; allTasks = CompletableFuture.allOf(task1,task2,task3); 33 try { 34 allTasks.get(3, TimeUnit.SECONDS); 35 } catch (TimeoutException ex) { 36 //Do Nothing! 37 } 38 return prices; 39 } 40 41 @AllArgsConstructor 42 class FetchData implements Runnable { 43 44 String url; 45 String productId; 46 Map\u0026lt;String, Float\u0026gt; prices; 47 48 @Override 49 @SneakyThrows 50 public void run() { 51 if (url.contains(\u0026#34;amazon\u0026#34;)) { 52 //http fetch from amazon 53 System.out.println(\u0026#34;Fetching price from amazon!\u0026#34;); 54 TimeUnit.SECONDS.sleep(2); 55 prices.put(\u0026#34;amazon\u0026#34;, 2.35f); 56 } 57 58 if (url.contains(\u0026#34;ebay\u0026#34;)) { 59 System.out.println(\u0026#34;Fetching price from ebay!\u0026#34;); 60 //http fetch from ebay 61 TimeUnit.SECONDS.sleep(4); 62 prices.put(\u0026#34;ebay\u0026#34;, 2.30f); 63 } 64 65 if (url.contains(\u0026#34;flipkart\u0026#34;)) { 66 System.out.println(\u0026#34;Fetching price from flipkart!\u0026#34;); 67 //http fetch from flipkart 68 TimeUnit.SECONDS.sleep(1); 69 prices.put(\u0026#34;flipkart\u0026#34;, 2.10f); 70 } 71 } 72 } 73} 74 Result\n1{amazon=2.35, flipkart=2.1} Setup 1# Project 01 2 3Data Structure \u0026amp; Algorithms \u0026amp; Design Patterns 4 5[https://gitorko.github.io/grokking-the-coding-interview/](https://gitorko.github.io/grokking-the-coding-interview/) 6[https://gitorko.github.io/design-patterns/](https://gitorko.github.io/design-patterns/) 7 8### Version 9 10Check version 11 12```bash 13$java --version 14openjdk version \u0026#34;21.0.3\u0026#34; 2024-04-16 LTS 15``` 16 17### Online code editor 18 19https://rustpad.io/ 20 21https://collabedit.com/ 22 23https://app.coderpad.io/ 24 25https://codeshare.io/ 26 27### Topic 28 2901. Number 3002. String 3103. Map \u0026amp; Set 3204. Heap 3305. Sliding window / Two pointer 3406. Matrix / Grid 3507. Backtracking 3608. Pre-Sum 3709. DP 3810. Link List 3911. Binary Tree / BST 4012. Interval 4113. Binary Search 4214. Topological Sort 4315. Stack \u0026amp; Monotonic Stack \u0026amp; Queue 4416. Graphs 4517. Thread 4618. Greedy 4719. Segment Tree 4820. Prefix Tree / Trie 4921. Cyclic sort 5022. Bit Manipulation 5125. Generic ","link":"https://gitorko.github.io/post/scatter-gather-pattern/","section":"post","tags":["design-pattern","scatter-gather-pattern"],"title":"Scatter Gather Pattern"},{"body":"","link":"https://gitorko.github.io/tags/scatter-gather-pattern/","section":"tags","tags":null,"title":"Scatter-Gather-Pattern"},{"body":"Spring Boot QueryDSL lets you query the database using domain specific language similar to SQL.\nGithub: https://github.com/gitorko/project75\nSpring QueryDSL Let's say you used Spring Data to query the db by using spring naming convention. If your table has 100's of column and you have to query by any column you can't write 100 access functions. This is where query dsl comes into play.\nCode 1package com.demo.project75; 2 3import java.util.stream.IntStream; 4 5import com.demo.project75.domain.Customer; 6import com.demo.project75.repository.CustomerRepository; 7import org.springframework.boot.CommandLineRunner; 8import org.springframework.boot.SpringApplication; 9import org.springframework.boot.autoconfigure.SpringBootApplication; 10 11@SpringBootApplication 12public class Main { 13 14 public static void main(String[] args) { 15 SpringApplication.run(Main.class, args); 16 } 17 18 public CommandLineRunner onStart(CustomerRepository customerRepository) { 19 return (args) -\u0026gt; { 20 //Insert test data 21 IntStream.range(0, 5).forEach(i -\u0026gt; { 22 customerRepository.save(Customer.builder() 23 .firstName(\u0026#34;firstname_\u0026#34; + i) 24 .lastName(\u0026#34;lastname \u0026#34; + i) 25 .age(30) 26 .email(\u0026#34;email@email.com\u0026#34;) 27 .build()); 28 }); 29 }; 30 } 31} 32 33 34 35 It uses in memory h2 db to persist.\nSetup 1# Project 75 2 3Spring Boot - Querydsl 4 5[https://gitorko.github.io/spring-boot-querydsl/](https://gitorko.github.io/spring-boot-querydsl/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk version \u0026#34;21.0.3\u0026#34; 2024-04-16 LTS 14``` 15 16### Dev 17 18To run the code. 19 20```bash 21./gradlew clean build 22./gradlew bootRun 23``` Testing You can now search based on all the columns of the db and get the response.\nhttp://localhost:8080/users?age=30\nhttp://localhost:8080/users?firstName=firstname_0\nhttp://localhost:8080/users?firstName=firstname_0\u0026amp;age=30\nReferences Spring Query DSL : http://www.querydsl.com/static/querydsl/latest/reference/html/ch02.html\n","link":"https://gitorko.github.io/post/spring-querydsl/","section":"post","tags":["spring","spring-boot","querydsl"],"title":"Spring - QueryDSL"},{"body":"Spring application with vault integration\nGithub: https://github.com/gitorko/project76\nVault HashiCorp's vault is a tool to store and secure secrets along with tight access control. You can store tokens, passwords, certificates, API keys and other secrets. Spring Vault provides spring abstractions to vault. Sometimes you need your running application to detect the changed property value in order to provide a toggle on/off feature.\nCode Based on the spring profile the respective properties get loaded from vault.\nAfter the feature flag is changed, the new property value is detected by the application without needing a restart.\n1package com.demo.project76; 2 3import java.util.Arrays; 4 5import com.demo.project76.domain.Customer; 6import com.demo.project76.domain.MySecrets; 7import com.demo.project76.repository.CustomerRepository; 8import lombok.RequiredArgsConstructor; 9import lombok.extern.slf4j.Slf4j; 10import org.springframework.beans.factory.annotation.Value; 11import org.springframework.boot.CommandLineRunner; 12import org.springframework.boot.SpringApplication; 13import org.springframework.boot.autoconfigure.SpringBootApplication; 14import org.springframework.boot.context.properties.EnableConfigurationProperties; 15import org.springframework.context.annotation.Bean; 16import org.springframework.core.env.ConfigurableEnvironment; 17import org.springframework.vault.core.VaultKeyValueOperationsSupport; 18import org.springframework.vault.core.VaultOperations; 19import org.springframework.vault.core.VaultSysOperations; 20import org.springframework.vault.core.VaultTemplate; 21import org.springframework.vault.core.VaultTransitOperations; 22import org.springframework.vault.support.VaultMount; 23import org.springframework.vault.support.VaultResponse; 24 25@SpringBootApplication 26@Slf4j 27@RequiredArgsConstructor 28@EnableConfigurationProperties(MySecrets.class) 29public class Main { 30 31 private final VaultTemplate vaultTemplate; 32 private final MySecrets mySecrets; 33 private final VaultOperations operations; 34 35 @Value(\u0026#34;${my-group.username}\u0026#34;) 36 private String userName; 37 38 @Value(\u0026#34;${my-group.appType}\u0026#34;) 39 private String appType; 40 41 public static void main(String[] args) { 42 SpringApplication.run(Main.class, args); 43 } 44 45 @Bean 46 public CommandLineRunner onStart(CustomerRepository customerRepository, ConfigurableEnvironment environment) { 47 return args -\u0026gt; { 48 log.info(\u0026#34;Value injected via @Value userName : {}\u0026#34;, userName); 49 log.info(\u0026#34;Value injected via @Value environment : {}\u0026#34;, environment); 50 log.info(\u0026#34;Value injected via @Value appType : {}\u0026#34;, appType); 51 log.info(\u0026#34;Value injected via class mySecrets: {}\u0026#34;, mySecrets); 52 53 //Reading directly. 54 if (Arrays.stream(environment.getActiveProfiles()).anyMatch(t -\u0026gt; t.equals(\u0026#34;dev\u0026#34;))) { 55 VaultResponse response = vaultTemplate.opsForKeyValue(\u0026#34;secret\u0026#34;, 56 VaultKeyValueOperationsSupport.KeyValueBackend.KV_2).get(\u0026#34;myapp/dev\u0026#34;); 57 log.info(\u0026#34;Value of myKey: {} \u0026#34;, response.getData().get(\u0026#34;myKey\u0026#34;)); 58 } else { 59 VaultResponse response = vaultTemplate.opsForKeyValue(\u0026#34;secret\u0026#34;, 60 VaultKeyValueOperationsSupport.KeyValueBackend.KV_2).get(\u0026#34;myapp/prod\u0026#34;); 61 log.info(\u0026#34;Value of myKey: {} \u0026#34;, response.getData().get(\u0026#34;myKey\u0026#34;)); 62 } 63 64 //Writing new values to different path. 65 VaultTransitOperations transitOperations = vaultTemplate.opsForTransit(); 66 VaultSysOperations sysOperations = vaultTemplate.opsForSys(); 67 if (!sysOperations.getMounts().containsKey(\u0026#34;transit/\u0026#34;)) { 68 sysOperations.mount(\u0026#34;transit\u0026#34;, VaultMount.create(\u0026#34;transit\u0026#34;)); 69 transitOperations.createKey(\u0026#34;foo-key\u0026#34;); 70 } 71 72 // Encrypt a plain-text value 73 String ciphertext = transitOperations.encrypt(\u0026#34;foo-key\u0026#34;, \u0026#34;Secure message\u0026#34;); 74 log.info(\u0026#34;Encrypted value: {}\u0026#34;, ciphertext); 75 76 // Decrypt 77 String plaintext = transitOperations.decrypt(\u0026#34;foo-key\u0026#34;, ciphertext); 78 log.info(\u0026#34;Decrypted value: {}\u0026#34;, plaintext); 79 80 //Save to db, connection established via vault credentials 81 Customer customer = customerRepository.save(Customer.builder().firstName(\u0026#34;John\u0026#34;).lastName(\u0026#34;Rambo\u0026#34;).build()); 82 log.info(\u0026#34;Customer: {}\u0026#34;, customer); 83 84 }; 85 } 86} 1package com.demo.project76.controller; 2 3 4import lombok.extern.slf4j.Slf4j; 5import org.springframework.beans.factory.annotation.Value; 6import org.springframework.cloud.context.config.annotation.RefreshScope; 7import org.springframework.web.bind.annotation.GetMapping; 8import org.springframework.web.bind.annotation.RestController; 9 10@RestController 11@RefreshScope 12@Slf4j 13public class HomeController { 14 15 @Value(\u0026#34;${featureFlag}\u0026#34;) 16 private Boolean featureFlag; 17 18 @GetMapping(value = \u0026#34;/greet\u0026#34;) 19 public String greet() { 20 log.info(\u0026#34;featureFlag: {}\u0026#34;, featureFlag); 21 return featureFlag ? \u0026#34;Good Morning\u0026#34; : \u0026#34;Good Bye\u0026#34;; 22 } 23} 1package com.demo.project76.domain; 2 3import lombok.Data; 4import org.springframework.boot.context.properties.ConfigurationProperties; 5import org.springframework.context.annotation.Configuration; 6 7@Data 8@Configuration 9@ConfigurationProperties(\u0026#34;my-group\u0026#34;) 10public class MySecrets { 11 String username; 12 String password; 13 String dbname; 14} 1my-group: 2 dbname: ${dbname} 3 username: ${username} 4 password: ${password} 5 appType: dev 6spring: 7 application: 8 name: myapp 9 main: 10 banner-mode: \u0026#34;off\u0026#34; 11 datasource: 12 driver-class-name: org.postgresql.Driver 13 host: localhost 14 url: jdbc:postgresql://${spring.datasource.host}:5432/${my-group.dbname} 15 username: ${my-group.username} 16 password: ${my-group.password} 17 jpa: 18 show-sql: false 19 hibernate.ddl-auto: create-drop 20 properties.hibernate.temp.use_jdbc_metadata_defaults: false 21 database-platform: org.hibernate.dialect.PostgreSQLDialect 22 defer-datasource-initialization: true 23management: 24 endpoints: 25 web: 26 exposure: 27 include: refresh 1spring: 2 cloud: 3 # Configuration for a vault server running in dev mode 4 vault: 5 scheme: http 6 host: localhost 7 port: 8200 8 connection-timeout: 5000 9 read-timeout: 15000 10 authentication: TOKEN 11 token: 00000000-0000-0000-0000-000000000000 12 kv: 13 enabled=true: 14 application-name: myapp To provide a feature toggle feature you can use the @RefreshScope annotation and trigger a refresh using spring actuator.\nSetup 1# Project 76 2 3Spring Boot - Vault \u0026amp; Property Refresh 4 5[https://gitorko.github.io/spring-vault/](https://gitorko.github.io/spring-vault/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk version \u0026#34;21.0.3\u0026#34; 2024-04-16 LTS 14 15$vault --version 16Vault v1.5.0 (\u0026#39;340cc2fa263f6cbd2861b41518da8a62c153e2e7+CHANGES\u0026#39;) 17``` 18 19### Postgres DB 20 21``` 22docker run -p 5432:5432 --name pg-container -e POSTGRES_PASSWORD=password -d postgres:9.6.10 23docker ps 24docker exec -it pg-container psql -U postgres -W postgres 25CREATE USER test WITH PASSWORD \u0026#39;test@123\u0026#39;; 26CREATE DATABASE \u0026#34;test-db\u0026#34; WITH OWNER \u0026#34;test\u0026#34; ENCODING UTF8 TEMPLATE template0; 27grant all PRIVILEGES ON DATABASE \u0026#34;test-db\u0026#34; to test; 28 29docker stop pg-container 30docker start pg-container 31``` 32 33## Vault 34 35To install vault on mac run the command, for other OS download and install vault. 36 37```bash 38brew install vault 39``` 40 41Start the dev server 42 43```bash 44vault server -dev -log-level=INFO -dev-root-token-id=00000000-0000-0000-0000-000000000000 45``` 46 47Once vault is up, insert some values 48 49```bash 50export VAULT_ADDR=http://localhost:8200 51export VAULT_SKIP_VERIFY=true 52export VAULT_TOKEN=00000000-0000-0000-0000-000000000000 53vault kv put secret/myapp/dev username=test password=test@123 dbname=test-db myKey=foobar featureFlag=true 54vault kv put secret/myapp/prod username=test password=test@123 dbname=test-db myKey=fooprod featureFlag=true 55``` 56 57You can login to vault UI with token \u0026#39;00000000-0000-0000-0000-000000000000\u0026#39; 58 59Vault UI: [http://127.0.0.1:8200/](http://127.0.0.1:8200/) 60 61To update property value 62 63```bash 64vault kv patch secret/myapp/dev featureFlag=true 65vault kv patch secret/myapp/dev featureFlag=false 66``` 67 68### Dev 69 70To run the code. 71 72```bash 73./gradlew clean build 74./gradlew bootRun --args=\u0026#39;--spring.profiles.active=dev\u0026#39; 75./gradlew bootRun --args=\u0026#39;--spring.profiles.active=prod\u0026#39; 76``` Testing You should now see the values being fetched from vault.\nYou can now invoke greet api to see a 'Good Morning' response.\n1curl --location --request GET \u0026#39;localhost:8080/greet\u0026#39; Now lets change the feature flag to false in vault\n1vault kv patch secret/myapp/dev featureFlag=false In order for the values to be refreshed by spring context you need to make a call to actuator api\n1curl --location --request POST \u0026#39;http://localhost:8080/actuator/refresh\u0026#39; Now the values will be refreshed and invoking greet api will show 'Good Bye' response.\n1curl --location --request GET \u0026#39;localhost:8080/greet\u0026#39; Few more vault commands to try out\n1vault kv get -field=username secret/myapp/dev 2vault kv delete secret/myapp/dev 3vault kv delete secret/myapp/prod References https://cloud.spring.io/spring-cloud-vault/reference/html/\nhttps://www.vaultproject.io/\nhttps://spring.io/guides/gs/accessing-vault/\n","link":"https://gitorko.github.io/post/spring-vault/","section":"post","tags":["spring"],"title":"Spring Vault"},{"body":"","link":"https://gitorko.github.io/categories/vault/","section":"categories","tags":null,"title":"Vault"},{"body":"","link":"https://gitorko.github.io/tags/sleuth/","section":"tags","tags":null,"title":"Sleuth"},{"body":"","link":"https://gitorko.github.io/categories/sleuth/","section":"categories","tags":null,"title":"Sleuth"},{"body":" Deprecated Spring cloud sleuth is deprecated, Refer to https://gitorko.github.io/post/spring-observability/\nSpring cloud sleuth helps you trace a request and zipkin server help you trace in a distributed environment.\nGithub: https://github.com/gitorko/project72\nSpring Cloud Sleuth \u0026amp; Zipkin How do you trace \u0026amp; debug a request in a single server? Now when it is deployed in pods and scaled how do you trace a request in a distributed environment? Spring Cloud Sleuth help you trace a request by appending unique trace id in the log statements. You can the publish such traces to the zipkin server which lets you visualize a request across distributed environment. You can then see the latency of each request in a distributed transaction.\nInternally it has 4 modules –\nCollector – Once any component sends the trace data arrives to Zipkin collector daemon, it is validated, stored, and indexed for lookups by the Zipkin collector. Storage – This module store and index the lookup data in backend. Cassandra, ElasticSearch and MySQL are supported. Search – This module provides a simple JSON API for finding and retrieving traces stored in backend. The primary consumer of this API is the Web UI. Web UI – A very nice UI interface for viewing traces. Code 1package com.demo.project72.service; 2 3import java.util.concurrent.TimeUnit; 4 5import brave.Span; 6import brave.Tracer; 7import lombok.RequiredArgsConstructor; 8import lombok.SneakyThrows; 9import lombok.extern.slf4j.Slf4j; 10import org.springframework.scheduling.annotation.Async; 11import org.springframework.scheduling.annotation.EnableAsync; 12import org.springframework.stereotype.Service; 13 14@Service 15@Slf4j 16@EnableAsync 17@RequiredArgsConstructor 18public class GreetService { 19 20 private final Tracer tracer; 21 22 @SneakyThrows 23 public void doSomeWorkSameSpan() { 24 TimeUnit.SECONDS.sleep(1); 25 log.info(\u0026#34;Work Span\u0026#34;); 26 } 27 28 public void doSomeWorkNewSpan() throws InterruptedException { 29 log.info(\u0026#34;Original span\u0026#34;); 30 Span newSpan = tracer.nextSpan().name(\u0026#34;newSpan\u0026#34;).start(); 31 try (Tracer.SpanInScope ws = tracer.withSpanInScope(newSpan.start())) { 32 TimeUnit.SECONDS.sleep(1); 33 log.info(\u0026#34;New Span\u0026#34;); 34 } finally { 35 newSpan.finish(); 36 } 37 log.info(\u0026#34;Original span\u0026#34;); 38 } 39 40 @Async 41 public void asyncMethod() throws InterruptedException { 42 log.info(\u0026#34;Start Async Method\u0026#34;); 43 TimeUnit.SECONDS.sleep(1); 44 log.info(\u0026#34;End Async Method\u0026#34;); 45 } 46} 1package com.demo.project72.config; 2 3import java.util.concurrent.Executor; 4 5import lombok.RequiredArgsConstructor; 6import org.springframework.beans.factory.BeanFactory; 7import org.springframework.cloud.sleuth.instrument.async.LazyTraceExecutor; 8import org.springframework.context.annotation.Configuration; 9import org.springframework.scheduling.annotation.AsyncConfigurer; 10import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor; 11 12@Configuration 13@RequiredArgsConstructor 14class ThreadConfig implements AsyncConfigurer { 15 16 private final BeanFactory beanFactory; 17 18 @Override 19 public Executor getAsyncExecutor() { 20 ThreadPoolTaskExecutor threadPoolTaskExecutor = new ThreadPoolTaskExecutor(); 21 threadPoolTaskExecutor.setCorePoolSize(1); 22 threadPoolTaskExecutor.setMaxPoolSize(1); 23 threadPoolTaskExecutor.initialize(); 24 return new LazyTraceExecutor(beanFactory, threadPoolTaskExecutor); 25 } 26 27} 1spring: 2 main: 3 banner-mode: \u0026#34;off\u0026#34; 4 application: 5 name: project72 6 sleuth: 7 enabled: true 8 sampler: 9 probability: 1.0 10 zipkin: 11 base-url: http://localhost:9411/ 12 enabled: true 13 sender: 14 type: web 15 service: 16 name: my-service Setup 1# Project 72 2 3Spring Cloud Sleuth \u0026amp; Zipkin (Deprecated) 4 5[https://gitorko.github.io/spring-cloud-sleuth-zipkin/](https://gitorko.github.io/spring-cloud-sleuth-zipkin/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk 17.0.3 2022-04-19 LTS 14``` 15 16### Zipkin 17 18To run zipkin server use the docker command 19 20```bash 21docker run -d -p 9411:9411 --name my-zipkin openzipkin/zipkin 22docker stop my-zipkin 23docker start my-zipkin 24``` 25 26Login to zipkin UI, wait for few seconds for server to be up. 27 28[http://localhost:9411/zipkin/](http://localhost:9411/zipkin/) 29 30### Dev 31 32To run the code. 33 34```bash 35./gradlew clean build 36./gradlew bootRun 37``` Testing Invoke the rest api and notice the trace\nhttp://localhost:8080/hello-span\nhttp://localhost:8080/hello-new-span\nhttp://localhost:8080/hello-async\nYou can now view the trace in zipkin UI\nReferences https://cloud.spring.io/spring-cloud-sleuth/\nhttps://zipkin.io/\n","link":"https://gitorko.github.io/post/spring-cloud-sleuth-zipkin/","section":"post","tags":["zipkin","sleuth"],"title":"Spring Cloud Sleuth \u0026 Zipkin"},{"body":"","link":"https://gitorko.github.io/tags/zipkin/","section":"tags","tags":null,"title":"Zipkin"},{"body":"","link":"https://gitorko.github.io/categories/zipkin/","section":"categories","tags":null,"title":"Zipkin"},{"body":"","link":"https://gitorko.github.io/tags/amqp/","section":"tags","tags":null,"title":"Amqp"},{"body":"","link":"https://gitorko.github.io/tags/rpc/","section":"tags","tags":null,"title":"Rpc"},{"body":"Spring with RabbitMQ message broker that implements Advanced Message Queuing Protocol(AMQP)\nGithub: https://github.com/gitorko/project78\nRabbitMQ Exchanges are like post offices or mailboxes and clients publish a message to an AMQP exchange. There are four built-in exchange types\nDirect Exchange – Routes messages to a queue by matching a complete routing key Fanout Exchange – Routes messages to all the queues bound to it Topic Exchange – Routes messages to multiple queues by matching a routing key to a pattern Headers Exchange – Routes messages based on message headers Queues are bound to an exchange using a routing key. Messages are sent to an exchange with a routing key. AMQP (Advanced Message Queuing Protocol) is an open standard wire specification for asynchronous message communication, AMQP provides platform-neutral binary protocol standard, hence it can run on different environments \u0026amp; programming languages unlike JMS.\nRemote procedure call (RPC) is a way to invoking a function on another computer and waiting for the result. The call is synchronous and blocking in nature, so the client will wait for the response.\nCode Queue to send and receive messages\n1package com.demo.project78.queue; 2 3import com.demo.project78.config.AmqpConfig; 4import org.springframework.amqp.core.Queue; 5import org.springframework.amqp.core.QueueBuilder; 6import org.springframework.context.annotation.Bean; 7import org.springframework.context.annotation.Configuration; 8 9@Configuration 10public class QueueConfig { 11 12 @Bean 13 public Queue simpleQueue() { 14 return QueueBuilder.durable(AmqpConfig.SIMPLE_QUEUE).build(); 15 } 16 17} 18 19 20 21 1package com.demo.project78.queue; 2 3import com.demo.project78.config.AmqpConfig; 4import com.demo.project78.domain.Customer; 5import lombok.RequiredArgsConstructor; 6import lombok.extern.slf4j.Slf4j; 7import org.springframework.amqp.rabbit.core.RabbitTemplate; 8import org.springframework.stereotype.Component; 9 10@Component 11@Slf4j 12@RequiredArgsConstructor 13public class QueueSender { 14 15 private final RabbitTemplate rabbitTemplate; 16 17 public void send(Customer customer) { 18 rabbitTemplate.convertAndSend(AmqpConfig.SIMPLE_QUEUE, customer); 19 log.info(\u0026#34;Sent to {} : {}\u0026#34;, AmqpConfig.SIMPLE_QUEUE, customer); 20 } 21} 1package com.demo.project78.queue; 2 3import com.demo.project78.config.AmqpConfig; 4import com.demo.project78.domain.Customer; 5import lombok.extern.slf4j.Slf4j; 6import org.springframework.amqp.rabbit.annotation.RabbitListener; 7import org.springframework.stereotype.Component; 8 9@Component 10@Slf4j 11public class QueueReceiver { 12 13 @RabbitListener(queues = AmqpConfig.SIMPLE_QUEUE, containerFactory = \u0026#34;rabbitListenerContainerFactory\u0026#34;) 14 public void receive(Customer customer) { 15 log.info(\u0026#34;{} Received: {}\u0026#34;, AmqpConfig.SIMPLE_QUEUE, customer); 16 17 //Simulate a failure on processing 18 if (customer.getName().equals(\u0026#34;NO_NAME\u0026#34;)) { 19 throw new RuntimeException(\u0026#34;No customer name!\u0026#34;); 20 } 21 } 22} Direct exchange with routing key\n1package com.demo.project78.exchange; 2 3import static com.demo.project78.config.AmqpConfig.DIRECT_ROUTING_KEY; 4 5import com.demo.project78.config.AmqpConfig; 6import org.springframework.amqp.core.Binding; 7import org.springframework.amqp.core.BindingBuilder; 8import org.springframework.amqp.core.Exchange; 9import org.springframework.amqp.core.ExchangeBuilder; 10import org.springframework.amqp.core.Queue; 11import org.springframework.amqp.core.QueueBuilder; 12import org.springframework.context.annotation.Bean; 13import org.springframework.context.annotation.Configuration; 14 15@Configuration 16public class ExchangeConfig { 17 18 @Bean 19 public Queue directQueue() { 20 return QueueBuilder.durable(AmqpConfig.DIRECT_QUEUE).build(); 21 } 22 23 24 @Bean 25 public Exchange directExchange() { 26 /** 27 * DirectExchange - Routes messages with a routing key that exactly matches the binding key of a queue. 28 * routing logic is based on exact matches 29 */ 30 return ExchangeBuilder.directExchange(AmqpConfig.DIRECT_EXCHANGE).durable(true).build(); 31 } 32 33 @Bean 34 Binding binding(Queue directQueue, Exchange directExchange) { 35 return BindingBuilder.bind(directQueue).to(directExchange).with(DIRECT_ROUTING_KEY).noargs(); 36 } 37 38} 1package com.demo.project78.exchange; 2 3import com.demo.project78.config.AmqpConfig; 4import com.demo.project78.domain.Customer; 5import lombok.RequiredArgsConstructor; 6import lombok.extern.slf4j.Slf4j; 7import org.springframework.amqp.rabbit.core.RabbitTemplate; 8import org.springframework.stereotype.Component; 9 10@Component 11@Slf4j 12@RequiredArgsConstructor 13public class ExchangeSender { 14 15 final RabbitTemplate rabbitTemplate; 16 17 public void send(Customer customer, String routingKey) { 18 rabbitTemplate.convertAndSend(AmqpConfig.DIRECT_EXCHANGE, routingKey, customer); 19 log.info(\u0026#34;Sent to {} with Key: {}, {}\u0026#34;, AmqpConfig.DIRECT_EXCHANGE, routingKey, customer); 20 } 21} 1package com.demo.project78.exchange; 2 3import com.demo.project78.config.AmqpConfig; 4import com.demo.project78.domain.Customer; 5import lombok.extern.slf4j.Slf4j; 6import org.springframework.amqp.rabbit.annotation.RabbitListener; 7import org.springframework.stereotype.Component; 8 9@Component 10@Slf4j 11public class ExchangeReceiver { 12 13 @RabbitListener(queues = AmqpConfig.DIRECT_QUEUE) 14 public void receive(Customer customer) { 15 log.info(\u0026#34;{} Received {}\u0026#34;, AmqpConfig.DIRECT_QUEUE, customer); 16 } 17 18} Fanout exchange\n1package com.demo.project78.fanout; 2 3import com.demo.project78.config.AmqpConfig; 4import org.springframework.amqp.core.BindingBuilder; 5import org.springframework.amqp.core.Declarables; 6import org.springframework.amqp.core.FanoutExchange; 7import org.springframework.amqp.core.Queue; 8import org.springframework.context.annotation.Bean; 9import org.springframework.context.annotation.Configuration; 10 11import static org.springframework.amqp.core.BindingBuilder.bind; 12 13@Configuration 14public class FanoutExchangeConfig { 15 16 @Bean 17 public Declarables fanoutBindings() { 18 Queue fanoutQueue1 = new Queue(AmqpConfig.FANOUT_QUEUE_1, false); 19 Queue fanoutQueue2 = new Queue(AmqpConfig.FANOUT_QUEUE_2, false); 20 FanoutExchange fanoutExchange = new FanoutExchange(AmqpConfig.FANOUT_EXCHANGE); 21 22 return new Declarables( 23 fanoutQueue1, 24 fanoutQueue2, 25 fanoutExchange, 26 bind(fanoutQueue1).to(fanoutExchange), 27 BindingBuilder.bind(fanoutQueue2).to(fanoutExchange)); 28 } 29} 1package com.demo.project78.fanout; 2 3import com.demo.project78.config.AmqpConfig; 4import com.demo.project78.domain.Customer; 5import lombok.RequiredArgsConstructor; 6import lombok.extern.slf4j.Slf4j; 7import org.springframework.amqp.rabbit.core.RabbitTemplate; 8import org.springframework.stereotype.Component; 9 10@Component 11@Slf4j 12@RequiredArgsConstructor 13public class FanoutSender { 14 15 private final RabbitTemplate rabbitTemplate; 16 17 public void send(Customer customer, String routingKey) { 18 rabbitTemplate.convertAndSend(AmqpConfig.FANOUT_EXCHANGE, routingKey, customer); 19 //routing key doesnt matter 20 log.info(\u0026#34;Sent to {} with Key: {}, {}\u0026#34;, AmqpConfig.FANOUT_EXCHANGE, routingKey, customer); 21 } 22} 1package com.demo.project78.fanout; 2 3import com.demo.project78.config.AmqpConfig; 4import com.demo.project78.domain.Customer; 5import lombok.extern.slf4j.Slf4j; 6import org.springframework.amqp.rabbit.annotation.RabbitListener; 7import org.springframework.stereotype.Component; 8 9@Component 10@Slf4j 11public class FanoutReceiver { 12 13 @RabbitListener(queues = AmqpConfig.FANOUT_QUEUE_1) 14 public void receive1(Customer customer) { 15 log.info(\u0026#34;{} Received {}\u0026#34;, AmqpConfig.FANOUT_QUEUE_1, customer); 16 } 17 18 @RabbitListener(queues = AmqpConfig.FANOUT_QUEUE_2) 19 public void receive2(Customer customer) { 20 log.info(\u0026#34;{} Received {}\u0026#34;, AmqpConfig.FANOUT_QUEUE_2, customer); 21 } 22} Topic exchange\n1package com.demo.project78.topic; 2 3import com.demo.project78.config.AmqpConfig; 4import org.springframework.amqp.core.BindingBuilder; 5import org.springframework.amqp.core.Declarables; 6import org.springframework.amqp.core.ExchangeBuilder; 7import org.springframework.amqp.core.Queue; 8import org.springframework.amqp.core.QueueBuilder; 9import org.springframework.amqp.core.TopicExchange; 10import org.springframework.context.annotation.Bean; 11import org.springframework.context.annotation.Configuration; 12 13@Configuration 14public class TopicExchangeConfig { 15 16 @Bean 17 public Declarables topicBindings() { 18 Queue topicQueue1 = QueueBuilder.durable(AmqpConfig.TOPIC_QUEUE_1).build(); 19 Queue topicQueue2 = QueueBuilder.durable(AmqpConfig.TOPIC_QUEUE_2).build(); 20 21 TopicExchange topicExchange = ExchangeBuilder.topicExchange(AmqpConfig.TOPIC_EXCHANGE).durable(true).build(); 22 23 return new Declarables( 24 topicQueue1, 25 topicQueue2, 26 topicExchange, 27 BindingBuilder 28 .bind(topicQueue1) 29 .to(topicExchange).with(\u0026#34;*.booking.*\u0026#34;), 30 BindingBuilder 31 .bind(topicQueue2) 32 .to(topicExchange).with(\u0026#34;#.reward\u0026#34;)); 33 } 34} 1package com.demo.project78.topic; 2 3import com.demo.project78.config.AmqpConfig; 4import com.demo.project78.domain.Customer; 5import lombok.RequiredArgsConstructor; 6import lombok.extern.slf4j.Slf4j; 7import org.springframework.amqp.rabbit.core.RabbitTemplate; 8import org.springframework.stereotype.Component; 9 10@Component 11@Slf4j 12@RequiredArgsConstructor 13public class TopicSender { 14 15 private final RabbitTemplate rabbitTemplate; 16 17 public void send(Customer customer, String routingKey) { 18 rabbitTemplate.convertAndSend(AmqpConfig.TOPIC_EXCHANGE, routingKey, customer); 19 log.info(\u0026#34;Sent to {} with Key: {}, {}\u0026#34;, AmqpConfig.TOPIC_EXCHANGE, routingKey, customer); 20 } 21} 1package com.demo.project78.topic; 2 3import com.demo.project78.config.AmqpConfig; 4import com.demo.project78.domain.Customer; 5import lombok.extern.slf4j.Slf4j; 6import org.springframework.amqp.rabbit.annotation.RabbitListener; 7import org.springframework.stereotype.Component; 8 9@Component 10@Slf4j 11public class TopicReceiver { 12 13 @RabbitListener(queues = AmqpConfig.TOPIC_QUEUE_1) 14 public void receive1(Customer customer) { 15 log.info(\u0026#34;[Processor1] {} Received {}\u0026#34;, AmqpConfig.TOPIC_QUEUE_1, customer); 16 } 17 18 @RabbitListener(queues = AmqpConfig.TOPIC_QUEUE_2) 19 public void receive2(Customer customer) { 20 log.info(\u0026#34;[Processor2] {} Received {}\u0026#34;, AmqpConfig.TOPIC_QUEUE_2, customer); 21 } 22} Error handling \u0026amp; Exchange creation\n1package com.demo.project78.config; 2 3import com.fasterxml.jackson.databind.ObjectMapper; 4import lombok.extern.slf4j.Slf4j; 5import org.springframework.amqp.rabbit.config.SimpleRabbitListenerContainerFactory; 6import org.springframework.amqp.rabbit.connection.ConnectionFactory; 7import org.springframework.amqp.rabbit.core.RabbitTemplate; 8import org.springframework.amqp.rabbit.listener.ConditionalRejectingErrorHandler; 9import org.springframework.amqp.support.converter.Jackson2JsonMessageConverter; 10import org.springframework.context.annotation.Bean; 11import org.springframework.context.annotation.Configuration; 12import org.springframework.util.ErrorHandler; 13 14@Configuration 15@Slf4j 16public class AmqpConfig { 17 18 public static final String SIMPLE_QUEUE = \u0026#34;project78.simple.queue\u0026#34;; 19 20 public static final String DIRECT_EXCHANGE = \u0026#34;project78.direct.exchange\u0026#34;; 21 public static final String DIRECT_QUEUE = \u0026#34;project78.direct.queue\u0026#34;; 22 public static final String DIRECT_ROUTING_KEY = \u0026#34;project78.direct.key\u0026#34;; 23 24 public static final String RPC_EXCHANGE = \u0026#34;project78.rpc.exchange\u0026#34;; 25 public static final String RPC_QUEUE = \u0026#34;project78.rpc.queue\u0026#34;; 26 public static final String RPC_KEY = \u0026#34;project78.rpc.key\u0026#34;; 27 28 public static final String FANOUT_QUEUE_1 = \u0026#34;project78.fanout.queue1\u0026#34;; 29 public static final String FANOUT_QUEUE_2 = \u0026#34;project78.fanout.queue2\u0026#34;; 30 public static final String FANOUT_EXCHANGE = \u0026#34;project78.fanout.exchange\u0026#34;; 31 public static final String FANOUT_KEY1 = \u0026#34;*.fan-key1.*\u0026#34;; 32 public static final String FANOUT_KEY2 = \u0026#34;*.fan-key2.*\u0026#34;; 33 34 public static final String TOPIC_QUEUE_1 = \u0026#34;project78.topic.booking\u0026#34;; 35 public static final String TOPIC_QUEUE_2 = \u0026#34;project78.topic.reward\u0026#34;; 36 public static final String TOPIC_EXCHANGE = \u0026#34;project78.topic.exchange\u0026#34;; 37 38 static final ObjectMapper objectMapper = new ObjectMapper(); 39 40 @Bean 41 public Jackson2JsonMessageConverter jsonMessageConverter() { 42 return new Jackson2JsonMessageConverter(objectMapper); 43 } 44 45 @Bean 46 public RabbitTemplate rabbitTemplate(ConnectionFactory connectionFactory) { 47 final RabbitTemplate rabbitTemplate = new RabbitTemplate(connectionFactory); 48 rabbitTemplate.setMessageConverter(jsonMessageConverter()); 49 return rabbitTemplate; 50 } 51 52 /** 53 * Without setting the error handle the failed message getting re-queued will cause infinite loops 54 */ 55 @Bean 56 public SimpleRabbitListenerContainerFactory rabbitListenerContainerFactory(ConnectionFactory connectionFactory, 57 Jackson2JsonMessageConverter jsonMessageConverter, 58 ErrorHandler errorHandler) { 59 SimpleRabbitListenerContainerFactory factory = new SimpleRabbitListenerContainerFactory(); 60 factory.setConnectionFactory(connectionFactory); 61 factory.setMessageConverter(jsonMessageConverter); 62 factory.setErrorHandler(errorHandler); 63 //Not to re-queue 64 factory.setDefaultRequeueRejected(false); 65 return factory; 66 } 67 68 @Bean 69 public ErrorHandler errorHandler() { 70 return new ConditionalRejectingErrorHandler(new MyFatalExceptionStrategy()); 71 } 72 73 public static class MyFatalExceptionStrategy extends ConditionalRejectingErrorHandler.DefaultExceptionStrategy { 74 @Override 75 public boolean isFatal(Throwable t) { 76 /** 77 * Check exception and decide to re-queue or not. 78 * default is set to not re-queue in spring application property default-requeue-rejected: false 79 */ 80 return super.isFatal(t); 81 } 82 } 83} 84 RPC\n1package com.demo.project78.rpc.config; 2 3import com.demo.project78.config.AmqpConfig; 4import org.springframework.amqp.core.Binding; 5import org.springframework.amqp.core.BindingBuilder; 6import org.springframework.amqp.core.Exchange; 7import org.springframework.amqp.core.ExchangeBuilder; 8import org.springframework.amqp.core.Queue; 9import org.springframework.amqp.core.QueueBuilder; 10import org.springframework.context.annotation.Bean; 11import org.springframework.context.annotation.Configuration; 12 13@Configuration 14public class RpcConfig { 15 16 @Bean 17 public Queue rpcQueue() { 18 return QueueBuilder.durable(AmqpConfig.RPC_QUEUE).build(); 19 } 20 21 @Bean 22 Exchange rpcExchange() { 23 return ExchangeBuilder.directExchange(AmqpConfig.RPC_EXCHANGE).durable(true).build(); 24 } 25 26 @Bean 27 Binding binding(Queue rpcQueue, Exchange rpcExchange) { 28 return BindingBuilder.bind(rpcQueue).to(rpcExchange).with(AmqpConfig.RPC_KEY).noargs(); 29 } 30 31} 32 1package com.demo.project78.rpc.client; 2 3import com.demo.project78.config.AmqpConfig; 4import com.demo.project78.domain.Customer; 5import lombok.RequiredArgsConstructor; 6import lombok.extern.slf4j.Slf4j; 7import org.springframework.amqp.rabbit.core.RabbitTemplate; 8import org.springframework.stereotype.Component; 9 10@Component 11@RequiredArgsConstructor 12@Slf4j 13public class RpcClient { 14 15 private final RabbitTemplate rabbitTemplate; 16 17 public String send(Customer customer, String routingKey) { 18 rabbitTemplate.setReplyTimeout(60000); 19 log.info(\u0026#34;RPC Call with key: {} Sent to Exchange: {}\u0026#34;, routingKey, customer); 20 String response = (String) rabbitTemplate.convertSendAndReceive(AmqpConfig.RPC_EXCHANGE, routingKey, customer); 21 log.info(\u0026#34;RPC Call got \u0026#39;{}\u0026#39;\u0026#34;, response); 22 return response; 23 } 24 25} 1package com.demo.project78.rpc.server; 2 3import com.demo.project78.config.AmqpConfig; 4import com.demo.project78.domain.Customer; 5import lombok.SneakyThrows; 6import lombok.extern.slf4j.Slf4j; 7import org.springframework.amqp.rabbit.annotation.RabbitListener; 8import org.springframework.stereotype.Component; 9 10import java.util.concurrent.TimeUnit; 11 12@Component 13@Slf4j 14public class RpcServer { 15 16 @SneakyThrows 17 @RabbitListener(queues = AmqpConfig.RPC_QUEUE) 18 public String receive(Customer customer) { 19 log.info(\u0026#34;{} Received {}\u0026#34;, AmqpConfig.RPC_QUEUE, customer); 20 TimeUnit.SECONDS.sleep(2); 21 return \u0026#34;Hello world, \u0026#34; + customer.getName(); 22 } 23 24} Rest\n1package com.demo.project78.controller; 2 3import com.demo.project78.config.AmqpConfig; 4import com.demo.project78.domain.Customer; 5import com.demo.project78.exchange.ExchangeSender; 6import com.demo.project78.fanout.FanoutSender; 7import com.demo.project78.queue.QueueSender; 8import com.demo.project78.rpc.client.RpcClient; 9import com.demo.project78.topic.TopicSender; 10import lombok.RequiredArgsConstructor; 11import org.springframework.http.ResponseEntity; 12import org.springframework.web.bind.annotation.GetMapping; 13import org.springframework.web.bind.annotation.RestController; 14 15@RestController 16@RequiredArgsConstructor 17public class HomeController { 18 19 final QueueSender queueSender; 20 final ExchangeSender exchangeSender; 21 final FanoutSender fanoutSender; 22 final TopicSender topicSender; 23 final RpcClient rpcClient; 24 25 @GetMapping(\u0026#34;/simple\u0026#34;) 26 public ResponseEntity simple() { 27 Customer customer = Customer.builder() 28 .name(\u0026#34;Jack\u0026#34;) 29 .age(35) 30 .build(); 31 //Simple object sent to queue 32 queueSender.send(customer); 33 return ResponseEntity.ok().build(); 34 } 35 36 @GetMapping(\u0026#34;/direct\u0026#34;) 37 public ResponseEntity directExchange() { 38 Customer customer = Customer.builder() 39 .name(\u0026#34;Adam\u0026#34;) 40 .age(40) 41 .build(); 42 //Will be received by the queue 43 exchangeSender.send(customer, AmqpConfig.DIRECT_ROUTING_KEY); 44 //Will not be received by any queue 45 exchangeSender.send(customer, \u0026#34;\u0026#34;); 46 return ResponseEntity.ok().build(); 47 } 48 49 @GetMapping(\u0026#34;/fanout\u0026#34;) 50 public ResponseEntity fanOut() { 51 Customer customer1 = Customer.builder() 52 .name(\u0026#34;Raj\u0026#34;) 53 .age(30) 54 .build(); 55 Customer customer2 = Customer.builder() 56 .name(\u0026#34;David\u0026#34;) 57 .age(32) 58 .build(); 59 60 //All queue registered will receive message irrespective of routing key 61 fanoutSender.send(customer1, AmqpConfig.FANOUT_KEY1); 62 fanoutSender.send(customer2, AmqpConfig.FANOUT_KEY2); 63 return ResponseEntity.ok().build(); 64 } 65 66 @GetMapping(\u0026#34;/topic\u0026#34;) 67 public ResponseEntity topicExchange() { 68 Customer customer = Customer.builder() 69 .name(\u0026#34;David\u0026#34;) 70 .age(32) 71 .build(); 72 //This will goto the booking queue only. 73 topicSender.send(customer, \u0026#34;status.booking.confirmed\u0026#34;); 74 //This will goto both the queue as it has booking + reward key 75 topicSender.send(customer, \u0026#34;status.booking.reward\u0026#34;); 76 return ResponseEntity.ok().build(); 77 } 78 79 @GetMapping(\u0026#34;/error\u0026#34;) 80 public ResponseEntity error() { 81 Customer customer = Customer.builder() 82 .name(\u0026#34;NO_NAME\u0026#34;) 83 .age(35) 84 .build(); 85 queueSender.send(customer); 86 return ResponseEntity.ok().build(); 87 } 88 89 @GetMapping(\u0026#34;/rpc\u0026#34;) 90 public ResponseEntity\u0026lt;String\u0026gt; rpc() { 91 Customer customer = Customer.builder() 92 .name(\u0026#34;Adam\u0026#34;) 93 .age(40) 94 .build(); 95 String response = rpcClient.send(customer, AmqpConfig.RPC_KEY); 96 return ResponseEntity.ok(response); 97 } 98} Postman Import the postman collection to postman\nPostman Collection\nSetup 1# Project 78 2 3Spring \u0026amp; RabbitMQ 4 5[https://gitorko.github.io/spring-amqp/](https://gitorko.github.io/spring-amqp/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk 21.0.3 2024-04-16 LTS 14``` 15 16### RabbitMQ 17 18Run the docker command to start a rabbitmq instance 19 20```bash 21docker run -d --hostname my-rabbit --name my-rabbit -e RABBITMQ_DEFAULT_USER=guest -e RABBITMQ_DEFAULT_PASS=guest -p 8085:15672 -p 5672:5672 rabbitmq:3-management 22``` 23 24Open the rabbitmq console 25 26[http://localhost:8085](http://localhost:8085) 27 28``` 29user:guest 30pwd: guest 31``` 32 33### Dev 34 35```bash 36./gradlew bootRun 37``` References https://spring.io/projects/spring-amqp\nhttps://www.rabbitmq.com/tutorials/tutorial-six-spring-amqp.html\n","link":"https://gitorko.github.io/post/spring-rabbitmq/","section":"post","tags":["spring","spring-boot","rabbitmq","amqp","rpc"],"title":"Spring - RabbitMQ"},{"body":"Java based implementation of a state machine \u0026amp; spring library based implementation of state machine. State machine lets you move to different states based on the events, you can also have listeners registered that get notified on state change events and carry out certain actions.\nGithub: https://github.com/gitorko/project77\nState Machine We will use the shopping cart state machine diagram as a reference to implement our code. If any invalid events are sent then an exception is thrown.\nCode Here we use state design pattern and observer pattern to design a state machine.\n1package com.demo.project77.simple; 2 3import java.util.ArrayList; 4import java.util.List; 5import java.util.concurrent.TimeUnit; 6 7import lombok.Builder; 8import lombok.Data; 9import lombok.SneakyThrows; 10 11public class Application { 12 13 @SneakyThrows 14 public static void main(String[] args) throws RuntimeException { 15 16 NotifyListener notifyListener = new NotifyListener(); 17 notifyListener.registerObserver(new ShippedEventObserver()); 18 19 StateMachineContext stateMachine = StateMachineContext.builder() 20 .state(new BeginState()) 21 .notifyListener(notifyListener) 22 .build(); 23 stateMachine.sendEvent(ShoppingCartEvent.ADD_ITEM); 24 if (stateMachine.getId() != ShoppingCartState.SHOPPING_STATE) throw new RuntimeException(\u0026#34;ERROR\u0026#34;); 25 stateMachine.sendEvent(ShoppingCartEvent.ADD_ITEM); 26 if (stateMachine.getId() != ShoppingCartState.SHOPPING_STATE) throw new RuntimeException(\u0026#34;ERROR\u0026#34;); 27 stateMachine.sendEvent(ShoppingCartEvent.MAKE_PAYMENT); 28 if (stateMachine.getId() != ShoppingCartState.PAYMENT_STATE) throw new RuntimeException(\u0026#34;ERROR\u0026#34;); 29 stateMachine.sendEvent(ShoppingCartEvent.PAYMENT_FAIL); 30 if (stateMachine.getId() != ShoppingCartState.SHOPPING_STATE) throw new RuntimeException(\u0026#34;ERROR\u0026#34;); 31 stateMachine.sendEvent(ShoppingCartEvent.MAKE_PAYMENT); 32 stateMachine.sendEvent(ShoppingCartEvent.PAYMENT_SUCESS); 33 if (stateMachine.getId() != ShoppingCartState.SHIPPED_STATE) throw new RuntimeException(\u0026#34;ERROR\u0026#34;); 34 35 } 36} 37 38@Data 39@Builder 40class StateMachineContext { 41 State state; 42 ShoppingCartState id; 43 NotifyListener notifyListener; 44 45 public void sendEvent(ShoppingCartEvent event) { 46 state.nextState(this, event); 47 notifyListener.notifyObservers(this.state.getClass().getSimpleName()); 48 } 49} 50 51enum ShoppingCartState { 52 BEGIN_STATE, 53 SHOPPING_STATE, 54 PAYMENT_STATE, 55 SHIPPED_STATE; 56} 57 58enum ShoppingCartEvent { 59 ADD_ITEM, 60 MAKE_PAYMENT, 61 PAYMENT_SUCESS, 62 PAYMENT_FAIL; 63} 64 65 66interface State { 67 void nextState(StateMachineContext stateMachine, ShoppingCartEvent event); 68} 69 70@Data 71class BeginState implements State { 72 public ShoppingCartState id = ShoppingCartState.BEGIN_STATE; 73 74 @Override 75 public void nextState(StateMachineContext stateMachine, ShoppingCartEvent event) { 76 switch (event) { 77 case ADD_ITEM: { 78 ShoppingState nextState = new ShoppingState(); 79 stateMachine.setState(nextState); 80 stateMachine.setId(nextState.id); 81 break; 82 } 83 default: 84 throw new UnsupportedOperationException(\u0026#34;Not Supported!\u0026#34;); 85 } 86 } 87} 88 89@Data 90class ShoppingState implements State { 91 ShoppingCartState id = ShoppingCartState.SHOPPING_STATE; 92 93 @Override 94 public void nextState(StateMachineContext stateMachine, ShoppingCartEvent event) { 95 switch (event) { 96 case ADD_ITEM: { 97 ShoppingState nextState = new ShoppingState(); 98 stateMachine.setState(nextState); 99 stateMachine.setId(nextState.id); 100 break; 101 } 102 case MAKE_PAYMENT: { 103 PaymentState nextState = new PaymentState(); 104 stateMachine.setState(nextState); 105 stateMachine.setId(nextState.id); 106 break; 107 } 108 default: 109 throw new UnsupportedOperationException(\u0026#34;Not Supported!\u0026#34;); 110 } 111 } 112} 113 114@Data 115class PaymentState implements State { 116 ShoppingCartState id = ShoppingCartState.PAYMENT_STATE; 117 118 @Override 119 public void nextState(StateMachineContext stateMachine, ShoppingCartEvent event) { 120 switch (event) { 121 case PAYMENT_SUCESS: { 122 ShippedState nextState = new ShippedState(); 123 stateMachine.setState(nextState); 124 stateMachine.setId(nextState.id); 125 break; 126 } 127 case PAYMENT_FAIL: 128 ShoppingState nextState = new ShoppingState(); 129 stateMachine.setState(nextState); 130 stateMachine.setId(nextState.id); 131 break; 132 default: 133 throw new UnsupportedOperationException(\u0026#34;Not Supported!\u0026#34;); 134 } 135 } 136} 137 138@Data 139class ShippedState implements State { 140 ShoppingCartState id = ShoppingCartState.SHIPPED_STATE; 141 142 @Override 143 public void nextState(StateMachineContext stateMachine, ShoppingCartEvent event) { 144 throw new UnsupportedOperationException(\u0026#34;Not Supported!\u0026#34;); 145 } 146} 147 148interface Observer { 149 public void notify(String message); 150} 151 152class ShippedEventObserver implements Observer { 153 @Override 154 public void notify(String message) { 155 if (message.startsWith(\u0026#34;ShippedState\u0026#34;)) { 156 //This observer is interested only in shipped events. 157 System.out.println(\u0026#34;ShippedEventObserver got Message: \u0026#34; + message); 158 } 159 } 160} 161 162interface Subject { 163 public void registerObserver(Observer observer); 164 165 public void notifyObservers(String tick); 166} 167 168class NotifyListener implements Subject { 169 List\u0026lt;Observer\u0026gt; notifyList = new ArrayList\u0026lt;\u0026gt;(); 170 171 @Override 172 public void registerObserver(Observer observer) { 173 notifyList.add(observer); 174 } 175 176 @Override 177 public void notifyObservers(String message) { 178 notifyList.forEach(e -\u0026gt; e.notify(message)); 179 } 180} We can also use the spring state machine libraries\n1package com.demo.project77.spring; 2 3import java.util.EnumSet; 4 5import lombok.extern.slf4j.Slf4j; 6import org.springframework.boot.CommandLineRunner; 7import org.springframework.boot.SpringApplication; 8import org.springframework.boot.autoconfigure.SpringBootApplication; 9import org.springframework.context.annotation.Bean; 10import org.springframework.context.annotation.Configuration; 11import org.springframework.messaging.Message; 12import org.springframework.messaging.support.MessageBuilder; 13import org.springframework.statemachine.StateMachine; 14import org.springframework.statemachine.action.Action; 15import org.springframework.statemachine.config.EnableStateMachineFactory; 16import org.springframework.statemachine.config.EnumStateMachineConfigurerAdapter; 17import org.springframework.statemachine.config.StateMachineFactory; 18import org.springframework.statemachine.config.builders.StateMachineConfigurationConfigurer; 19import org.springframework.statemachine.config.builders.StateMachineStateConfigurer; 20import org.springframework.statemachine.config.builders.StateMachineTransitionConfigurer; 21import org.springframework.statemachine.listener.StateMachineListenerAdapter; 22import org.springframework.statemachine.state.State; 23import reactor.core.publisher.Mono; 24 25@SpringBootApplication 26@Slf4j 27public class Application { 28 29 public static void main(String[] args) { 30 SpringApplication.run(Application.class, args); 31 } 32 33 @Bean 34 public CommandLineRunner testStateMachine(StateMachineFactory\u0026lt;ShoppingCartState, ShoppingCartEvent\u0026gt; stateMachineFactory) { 35 return args -\u0026gt; { 36 StateMachine\u0026lt;ShoppingCartState, ShoppingCartEvent\u0026gt; stateMachine = stateMachineFactory.getStateMachine( 37 \u0026#34;mymachine\u0026#34;); 38 stateMachine.sendEvent(getEventMessage(ShoppingCartEvent.ADD_ITEM)).subscribe(); 39 if (!(stateMachine.getState().getId().equals(ShoppingCartState.SHOPPING_STATE))) 40 throw new RuntimeException(\u0026#34;ERROR\u0026#34;); 41 stateMachine.sendEvent(getEventMessage(ShoppingCartEvent.ADD_ITEM)).subscribe(); 42 if (!(stateMachine.getState().getId().equals(ShoppingCartState.SHOPPING_STATE))) 43 throw new RuntimeException(\u0026#34;ERROR\u0026#34;); 44 stateMachine.sendEvent(getEventMessage(ShoppingCartEvent.MAKE_PAYMENT)).subscribe(); 45 if (!(stateMachine.getState().getId().equals(ShoppingCartState.PAYMENT_STATE))) 46 throw new RuntimeException(\u0026#34;ERROR\u0026#34;); 47 stateMachine.sendEvent(getEventMessage(ShoppingCartEvent.PAYMENT_FAIL)).subscribe(); 48 if (!(stateMachine.getState().getId().equals(ShoppingCartState.SHOPPING_STATE))) 49 throw new RuntimeException(\u0026#34;ERROR\u0026#34;); 50 stateMachine.sendEvent(getEventMessage(ShoppingCartEvent.MAKE_PAYMENT)).subscribe(); 51 if (!(stateMachine.getState().getId().equals(ShoppingCartState.PAYMENT_STATE))) 52 throw new RuntimeException(\u0026#34;ERROR\u0026#34;); 53 stateMachine.sendEvent(getEventMessage(ShoppingCartEvent.PAYMENT_SUCESS)).subscribe(); 54 if (!(stateMachine.getState().getId().equals(ShoppingCartState.SHIPPED_STATE))) 55 throw new RuntimeException(\u0026#34;ERROR\u0026#34;); 56 log.info(\u0026#34;Final State: {}\u0026#34;, stateMachine.getState().getId()); 57 }; 58 } 59 60 private Mono\u0026lt;Message\u0026lt;ShoppingCartEvent\u0026gt;\u0026gt; getEventMessage(ShoppingCartEvent event) { 61 return Mono.just(MessageBuilder.withPayload(event).build()); 62 } 63} 64 65enum ShoppingCartEvent { 66 ADD_ITEM, 67 MAKE_PAYMENT, 68 PAYMENT_SUCESS, 69 PAYMENT_FAIL 70} 71 72enum ShoppingCartState { 73 BEGIN_STATE, 74 SHOPPING_STATE, 75 PAYMENT_STATE, 76 SHIPPED_STATE; 77} 78 79@Configuration 80@EnableStateMachineFactory 81@Slf4j 82class ShoppingStateMachineConfig extends EnumStateMachineConfigurerAdapter\u0026lt;ShoppingCartState, ShoppingCartEvent\u0026gt; { 83 84 @Override 85 public void configure(StateMachineStateConfigurer\u0026lt;ShoppingCartState, ShoppingCartEvent\u0026gt; states) throws Exception { 86 states 87 .withStates() 88 .initial(ShoppingCartState.BEGIN_STATE) 89 .end(ShoppingCartState.SHIPPED_STATE) 90 .states(EnumSet.allOf(ShoppingCartState.class)); 91 } 92 93 @Override 94 public void configure(StateMachineTransitionConfigurer\u0026lt;ShoppingCartState, ShoppingCartEvent\u0026gt; transitions) 95 throws Exception { 96 transitions 97 .withExternal() 98 .source(ShoppingCartState.BEGIN_STATE) 99 .target(ShoppingCartState.SHOPPING_STATE) 100 .event(ShoppingCartEvent.ADD_ITEM) 101 .action(initAction()) 102 .and() 103 .withExternal() 104 .source(ShoppingCartState.SHOPPING_STATE) 105 .target(ShoppingCartState.SHOPPING_STATE) 106 .event(ShoppingCartEvent.ADD_ITEM) 107 .and() 108 .withExternal() 109 .source(ShoppingCartState.SHOPPING_STATE) 110 .target(ShoppingCartState.PAYMENT_STATE) 111 .event(ShoppingCartEvent.MAKE_PAYMENT) 112 .and() 113 .withExternal() 114 .source(ShoppingCartState.PAYMENT_STATE) 115 .target(ShoppingCartState.SHIPPED_STATE) 116 .event(ShoppingCartEvent.PAYMENT_SUCESS) 117 .and() 118 .withExternal() 119 .source(ShoppingCartState.PAYMENT_STATE) 120 .target(ShoppingCartState.SHOPPING_STATE) 121 .event(ShoppingCartEvent.PAYMENT_FAIL); 122 } 123 124 @Override 125 public void configure(StateMachineConfigurationConfigurer\u0026lt;ShoppingCartState, ShoppingCartEvent\u0026gt; config) 126 throws Exception { 127 config 128 .withConfiguration() 129 .autoStartup(true) 130 .listener(new GlobalStateMachineListener()); 131 } 132 133 @Bean 134 public Action\u0026lt;ShoppingCartState, ShoppingCartEvent\u0026gt; initAction() { 135 log.info(\u0026#34;init action called!\u0026#34;); 136 return ctx -\u0026gt; log.info(\u0026#34;Id: {}\u0026#34;, ctx.getTarget().getId()); 137 } 138} 139 140@Slf4j 141class GlobalStateMachineListener extends StateMachineListenerAdapter\u0026lt;ShoppingCartState, ShoppingCartEvent\u0026gt; { 142 @Override 143 public void stateChanged(State\u0026lt;ShoppingCartState, ShoppingCartEvent\u0026gt; from, State\u0026lt;ShoppingCartState, ShoppingCartEvent\u0026gt; to) { 144 log.info(\u0026#34;State changed to : {}\u0026#34;, to.getId()); 145 } 146} Setup 1# Project 77 2 3Java \u0026amp; Spring based State Machine 4 5[https://gitorko.github.io/state-machine/](https://gitorko.github.io/state-machine/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk version \u0026#34;21.0.3\u0026#34; 2024-04-16 LTS 14``` 15 16### Dev 17 18To run the code. 19 20```bash 21./gradlew clean build 22./gradlew bootRun 23``` References https://spring.io/projects/spring-statemachine\n","link":"https://gitorko.github.io/post/state-machine/","section":"post","tags":["design-pattern","state-machine"],"title":"State Machine"},{"body":"","link":"https://gitorko.github.io/tags/state-machine/","section":"tags","tags":null,"title":"State-Machine"},{"body":"Producer consumer problem implementations\nGithub: https://github.com/gitorko/project01\nProducer Consumer Producer consumer using ArrayBlockingQueue.\n1package com.demo.basics.concurrency._04_producerconsumer; 2 3import java.util.concurrent.ArrayBlockingQueue; 4import java.util.concurrent.BlockingQueue; 5 6import lombok.SneakyThrows; 7import org.junit.jupiter.api.Test; 8 9/** 10 * [Produce Consumer - EASY]() 11 * 12 * - blocking queue 13 */ 14public class ProduceConsumer { 15 16 @SneakyThrows 17 @Test 18 public void test() { 19 // BlockingQueue\u0026lt;String\u0026gt; queue = new SynchronousQueue\u0026lt;\u0026gt;(); 20 BlockingQueue\u0026lt;String\u0026gt; queue = new ArrayBlockingQueue\u0026lt;\u0026gt;(5); 21 22 Runnable producer = () -\u0026gt; { 23 for (int i = 0; i \u0026lt; 20; i++) { 24 try { 25 queue.put(String.valueOf(i)); 26 System.out.println(\u0026#34;Published: \u0026#34; + i); 27 } catch (InterruptedException e) { 28 e.printStackTrace(); 29 } 30 } 31 try { 32 queue.put(\u0026#34;END\u0026#34;); 33 } catch (InterruptedException e) { 34 e.printStackTrace(); 35 } 36 37 }; 38 39 Runnable consumer = () -\u0026gt; { 40 while (true) { 41 try { 42 //TimeUnit.SECONDS.sleep(3); 43 String val = queue.take(); 44 if (val.equals(\u0026#34;END\u0026#34;)) break; 45 System.out.println(\u0026#34;Consumed: \u0026#34; + val); 46 } catch (InterruptedException e) { 47 e.printStackTrace(); 48 } 49 } 50 }; 51 52 Thread p = new Thread(producer); 53 Thread c = new Thread(consumer); 54 p.start(); 55 c.start(); 56 System.out.println(\u0026#34;Producer and Consumer has been started\u0026#34;); 57 p.join(); 58 c.join(); 59 System.out.println(\u0026#34;Completed\u0026#34;); 60 } 61} Producer consumer using wait notify.\n1package com.demo.basics.concurrency._04_producerconsumer; 2 3import java.util.LinkedList; 4import java.util.Queue; 5 6import lombok.SneakyThrows; 7import org.junit.jupiter.api.Test; 8 9/** 10 * [Produce Consumer - EASY]() 11 * 12 * - wait \u0026amp; notify 13 */ 14public class PCWaitNotify { 15 16 @SneakyThrows 17 @Test 18 public void test() { 19 MyBlockingQueue\u0026lt;String\u0026gt; queue = new MyBlockingQueue\u0026lt;\u0026gt;(); 20 Runnable producer = () -\u0026gt; { 21 for (int i = 0; i \u0026lt; 20; i++) { 22 queue.put(String.valueOf(i)); 23 System.out.println(\u0026#34;Published: \u0026#34; + i); 24 } 25 queue.put(\u0026#34;END\u0026#34;); 26 }; 27 28 Runnable consumer = () -\u0026gt; { 29 while (true) { 30 String val = queue.take(); 31 if (val.equals(\u0026#34;END\u0026#34;)) break; 32 System.out.println(\u0026#34;Consumed: \u0026#34; + val); 33 } 34 }; 35 36 Thread p = new Thread(producer); 37 Thread c = new Thread(consumer); 38 p.start(); 39 c.start(); 40 System.out.println(\u0026#34;Producer and Consumer has been started\u0026#34;); 41 p.join(); 42 c.join(); 43 System.out.println(\u0026#34;Completed\u0026#34;); 44 } 45 46 class MyBlockingQueue\u0026lt;E\u0026gt; { 47 private Queue\u0026lt;E\u0026gt; queue = new LinkedList\u0026lt;\u0026gt;(); 48 private int size = 5; 49 50 public void put(E e) { 51 synchronized (queue) { 52 try { 53 if (queue.size() == size) { 54 queue.wait(); 55 } 56 queue.add(e); 57 queue.notifyAll(); 58 } catch (InterruptedException ex) { 59 ex.printStackTrace(); 60 } 61 } 62 } 63 64 public E take() { 65 synchronized (queue) { 66 try { 67 while (queue.size() == 0) { 68 queue.wait(); 69 } 70 E item = queue.remove(); 71 queue.notifyAll(); 72 return item; 73 } catch (InterruptedException e) { 74 e.printStackTrace(); 75 return null; 76 } 77 } 78 } 79 } 80} 81 82 Producer Consumer using locks\n1package com.demo.basics.concurrency._04_producerconsumer; 2 3import java.util.LinkedList; 4import java.util.Queue; 5import java.util.concurrent.CountDownLatch; 6import java.util.concurrent.ExecutorService; 7import java.util.concurrent.Executors; 8import java.util.concurrent.locks.Condition; 9import java.util.concurrent.locks.Lock; 10import java.util.concurrent.locks.ReentrantLock; 11 12import lombok.SneakyThrows; 13import org.junit.jupiter.api.Test; 14 15/** 16 * [Produce Consumer - EASY]() 17 * 18 * - locks 19 */ 20public class PCLock { 21 22 @SneakyThrows 23 @Test 24 public void test() { 25 26 MyBlockingQueue\u0026lt;String\u0026gt; queue = new MyBlockingQueue\u0026lt;\u0026gt;(); 27 ExecutorService executor = Executors.newFixedThreadPool(5); 28 CountDownLatch latch = new CountDownLatch(2); 29 Runnable producer = () -\u0026gt; { 30 try { 31 for (int i = 0; i \u0026lt; 20; i++) { 32 queue.put(String.valueOf(i)); 33 System.out.println(\u0026#34;Published: \u0026#34; + i); 34 } 35 queue.put(\u0026#34;END\u0026#34;); 36 } finally { 37 latch.countDown(); 38 } 39 }; 40 41 Runnable consumer = () -\u0026gt; { 42 try { 43 while (true) { 44 //TimeUnit.SECONDS.sleep(3); 45 String val = queue.take(); 46 if (val.equals(\u0026#34;END\u0026#34;)) break; 47 System.out.println(\u0026#34;Consumed: \u0026#34; + val); 48 } 49 } catch (Exception ex) { 50 //Do Nothing 51 } finally { 52 latch.countDown(); 53 } 54 }; 55 executor.submit(producer); 56 executor.submit(consumer); 57 latch.await(); 58 59 } 60 61 class MyBlockingQueue\u0026lt;E\u0026gt; { 62 private Queue\u0026lt;E\u0026gt; queue = new LinkedList\u0026lt;\u0026gt;(); 63 private int size = 5; 64 private Lock lock = new ReentrantLock(true); 65 private Condition notFull = lock.newCondition(); 66 private Condition notEmpty = lock.newCondition(); 67 68 public void put(E e) { 69 lock.lock(); 70 try { 71 if (queue.size() == size) { 72 notFull.await(); 73 } 74 queue.add(e); 75 notEmpty.signalAll(); 76 } catch (InterruptedException ex) { 77 ex.printStackTrace(); 78 } finally { 79 lock.unlock(); 80 } 81 } 82 83 public E take() { 84 lock.lock(); 85 try { 86 while (queue.size() == 0) { 87 notEmpty.await(); 88 } 89 E item = queue.remove(); 90 notFull.signalAll(); 91 return item; 92 } catch (InterruptedException e) { 93 e.printStackTrace(); 94 return null; 95 } finally { 96 lock.unlock(); 97 } 98 } 99 } 100} 101 102 ","link":"https://gitorko.github.io/post/producer-consumer/","section":"post","tags":["design-pattern"],"title":"Producer Consumer"},{"body":"","link":"https://gitorko.github.io/tags/h2/","section":"tags","tags":null,"title":"H2"},{"body":"The N+1 query problem occurs when the framework executes N additional SQL statements to load lazily fetched objects, this happens when you use FetchType.LAZY for your entity associations.\nGithub: https://github.com/gitorko/project66\nN+1 problem By default, fetch is FetchType.LAZY in hibernate, changing to FetchType.EAGER won't guarantee a fix for N+1 issue either, eager fetch will fetch more data than needed. The @ManyToOne and @OneToOne associations use FetchType.EAGER by default.\nDifferent ways to solve N+1 Problem\nFetchType.EAGER - Fetches more data that you need Join fetch - joins the two tables \u0026amp; initializes the objects. It works with both JOIN and LEFT JOIN statements. JPA EntityGraphs - allows partial or specified fetching of objects, specify a fetch plan by EntityGraphs in order to determine which fields or properties should be fetched together Batching - @BatchSize(size = 10) Subselect - @Fetch(FetchMode.SUBSELECT) Spring Data JDBC supports Single Query Loading Fetch Graph vs Load Graph\nThere are two types of EntityGraphs, Fetch and Load, they define if the entities not specified by attributeNodes of EntityGraphs should be fetched lazily or eagerly.\nFETCH - default graph type. When it is selected, the attributes that are specified by attribute nodes of the entity graph are treated as FetchType.EAGER and attributes that are not specified are treated as FetchType.LAZY LOAD - attributes that are specified by attribute nodes of the entity graph are treated as FetchType.EAGER More complex and reusable graphs we can describe a fetch plan with its paths and boundaries with @NamedEntityGraph annotation in the entity class.\nCode N+1 query that executes N times\n1select c1_0.post1_id,c1_1.id,c1_1.comment from \u0026#34;post1_comments\u0026#34; c1_0 join \u0026#34;post-comment1\u0026#34; c1_1 on c1_1.id=c1_0.\u0026#34;comments_id\u0026#34; where c1_0.post1_id=? Sql with left join\n1select p1_0.id,c1_0.post1_id,c1_1.id,c1_1.comment,p1_0.title from post1 p1_0 left join \u0026#34;post1_comments\u0026#34; c1_0 on p1_0.id=c1_0.post1_id left join \u0026#34;post-comment1\u0026#34; c1_1 on c1_1.id=c1_0.\u0026#34;comments_id\u0026#34; Sql with join\n1select p1_0.id,c1_0.post1_id,c1_1.id,c1_1.comment,p1_0.title from post1 p1_0 join \u0026#34;post1_comments\u0026#34; c1_0 on p1_0.id=c1_0.post1_id join \u0026#34;post-comment1\u0026#34; c1_1 on c1_1.id=c1_0.\u0026#34;comments_id\u0026#34; Entity Graph\n1 select p1_0.id,c1_0.post4_id,c1_1.id,c1_1.comment,p1_0.title from post4 p1_0 left join \u0026#34;post4_comments\u0026#34; c1_0 on p1_0.id=c1_0.post4_id left join \u0026#34;post-comment4\u0026#34; c1_1 on c1_1.id=c1_0.\u0026#34;comments_id\u0026#34; Batch\n1select c1_0.post2_id,c1_1.id,c1_1.comment from \u0026#34;post2_comments\u0026#34; c1_0 join \u0026#34;post-comment2\u0026#34; c1_1 on c1_1.id=c1_0.\u0026#34;comments_id\u0026#34; where c1_0.post2_id in (?,?,?,?,?,?,?,?,?,?) Sub-Select\n1select c1_0.post3_id,c1_1.id,c1_1.comment from \u0026#34;post3_comments\u0026#34; c1_0 join \u0026#34;post-comment3\u0026#34; c1_1 on c1_1.id=c1_0.\u0026#34;comments_id\u0026#34; where c1_0.post3_id in (select p1_0.id from post3 p1_0) 1package com.demo.project66; 2 3import com.demo.project66.service.PostService; 4import lombok.extern.slf4j.Slf4j; 5import org.springframework.boot.CommandLineRunner; 6import org.springframework.boot.SpringApplication; 7import org.springframework.boot.autoconfigure.SpringBootApplication; 8import org.springframework.context.annotation.Bean; 9 10@SpringBootApplication 11@Slf4j 12public class Main { 13 14 public static void main(String[] args) { 15 SpringApplication.run(Main.class, args); 16 } 17 18 @Bean 19 public CommandLineRunner commandLineRunner(PostService postService) { 20 return (args) -\u0026gt; { 21 log.info(\u0026#34;Seeding data!\u0026#34;); 22 postService.seedData(); 23 }; 24 } 25 26} 1spring: 2 jpa: 3 hibernate: 4 ddl-auto: create-drop 5 show-sql: true 6logging: 7 level: 8 org.springframework.orm.jpa: DEBUG 9 org.hibernate.event.internal: TRACE 10 org.hibernate.internal.SessionImpl: TRACE Postman Import the postman collection to postman\nPostman Collection\nSetup 1# Project 66 2 3Spring Data JPA N+1 4 5[https://gitorko.github.io/spring-jpa-n-plus-1/](https://gitorko.github.io/spring-jpa-n-plus-1/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk 21.0.3 2024-04-16 LTS 14``` 15 16### Dev 17 18To run the code. 19 20```bash 21./gradlew clean build 22./gradlew bootRun 23``` References https://vladmihalcea.com/n-plus-1-query-problem\n","link":"https://gitorko.github.io/post/spring-jpa-n-plus-1/","section":"post","tags":["spring","jpa","h2"],"title":"Spring JPA N+1"},{"body":"Spring Boot with Micrometer \u0026amp; Integration with Prometheus \u0026amp; Grafana.\nGithub: https://github.com/gitorko/project68\nMicrometer Micrometer provides vendor neutral application metrics facade that can integrate with various monitoring systems like Prometheus, Wavefront, Atlas, Datadog, Graphite, Ganglia, Influx, JMX etc.\nTraditional systems which monitored JMX attributes could only do so at a particular instance of time. With the arrival of time series database we can now use that data and visualize it over a period in time. Writing the integration to various monitoring systems is time consuming, hence micrometer simplifies it. Underlying metrics are exposed by Spring Boot Actuator and then Micrometer provides a facade that can be used to either push or pull metrics to monitoring systems.\nEvery meter has a name (hierarchical) and tag. There are 4 main types of meters.\nTimers - Time taken to run something. Counter - Number of time something was run. Guages - Report data when observed. Gauges can be useful when monitoring stats of cache, collections Distribution summary - Distribution of events. MeterRegistryCustomizer, you can customize the whole set of registries at once or individual implementation.\nCode 1package com.demo.project68; 2 3import io.micrometer.core.annotation.Timed; 4import io.micrometer.core.instrument.Counter; 5import io.micrometer.core.instrument.MeterRegistry; 6import io.micrometer.core.instrument.Metrics; 7import jakarta.annotation.PostConstruct; 8import lombok.extern.slf4j.Slf4j; 9import org.springframework.beans.factory.annotation.Value; 10import org.springframework.boot.SpringApplication; 11import org.springframework.boot.actuate.autoconfigure.metrics.MeterRegistryCustomizer; 12import org.springframework.boot.autoconfigure.SpringBootApplication; 13import org.springframework.context.annotation.Bean; 14import org.springframework.context.annotation.Configuration; 15import org.springframework.context.annotation.EnableAspectJAutoProxy; 16import org.springframework.web.bind.annotation.GetMapping; 17import org.springframework.web.bind.annotation.RequestMapping; 18import org.springframework.web.bind.annotation.RestController; 19 20import java.util.Random; 21import java.util.concurrent.TimeUnit; 22 23@SpringBootApplication 24@Slf4j 25public class Main { 26 public static void main(String[] args) { 27 SpringApplication.run(Main.class, args); 28 } 29} 30 31@RestController 32@RequestMapping(\u0026#34;/api\u0026#34;) 33@Slf4j 34class AppController { 35 36 @Timed(\u0026#34;hello.api.time\u0026#34;) 37 @GetMapping(\u0026#34;/hello\u0026#34;) 38 public String sayHello() throws InterruptedException { 39 RegistryConfig.helloApiCounter.increment(); 40 int sleepTime = new Random().nextInt(10); 41 log.info(\u0026#34;Sleeping for seconds: {}\u0026#34;, sleepTime); 42 TimeUnit.SECONDS.sleep(sleepTime); 43 return \u0026#34;Hello, Sleep for \u0026#34; + sleepTime + \u0026#34; Seconds!\u0026#34;; 44 } 45} 46 47@Configuration 48@EnableAspectJAutoProxy 49class RegistryConfig { 50 51 public static Counter helloApiCounter; 52 53 @Bean 54 MeterRegistryCustomizer\u0026lt;MeterRegistry\u0026gt; configurer(@Value(\u0026#34;${spring.application.name}\u0026#34;) String applicationName) { 55 return registry -\u0026gt; registry.config().commonTags(\u0026#34;application\u0026#34;, applicationName); 56 } 57 58 @PostConstruct 59 public void postInit() { 60 helloApiCounter = Metrics.counter(\u0026#34;hello.api.count\u0026#34;, \u0026#34;type\u0026#34;, \u0026#34;order\u0026#34;); 61 } 62} 1server: 2 port: 8080 3management: 4 metrics: 5 export: 6 prometheus: 7 enabled: true 8 endpoints: 9 web: 10 exposure: 11 include: \u0026#34;*\u0026#34; 12 endpoint: 13 metrics: 14 enabled: true 15 prometheus: 16 enabled: true 17 metrics.enabled: true 18spring: 19 application: 20 name: myapp 1global: 2 scrape_interval: 10s 3 scrape_timeout: 5s 4 evaluation_interval: 10s 5alerting: 6 alertmanagers: 7 - static_configs: 8 - targets: [] 9 scheme: http 10 timeout: 10s 11scrape_configs: 12 - job_name: myapp 13 scrape_interval: 10s 14 scrape_timeout: 5s 15 metrics_path: /actuator/prometheus 16 scheme: http 17 static_configs: 18 - targets: 19 - 192.168.68.104:8080 Setup 1# Project 68 2 3Spring Boot Micrometer - Prometheus, Wavefront 4 5[https://gitorko.github.io/spring-boot-micrometer/](https://gitorko.github.io/spring-boot-micrometer/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk 17.0.3 2022-04-19 LTS 14``` 15 16### Dev 17 18To run code. 19 20```bash 21./gradlew clean build 22./gradlew bootRun 23``` 24 25## Prometheus 26 27Update the target ip-address in the prometheus.yml file, don\u0026#39;t use localhost when using docker container 28 29To start the prometheus docker instance build the docker image \u0026amp; run the image. 30 31```bash 32cd project68 33docker build -f docker/Dockerfile --force-rm -t my-prometheus . 34docker run -p 9090:9090 my-prometheus 35``` 36 37## Grafana 38 39To start the grafana docker instance run the command. 40 41```bash 42docker run --name grafana -d -p 3000:3000 grafana/grafana 43``` Prometheus \u0026amp; Grafana Invoke the rest api couple of times.\n1curl http://localhost:8080/api/hello Check if the metrics are returned by actuator\nhttp://localhost:8080/actuator/prometheus\nYou should see metrics similar to\n1hello_api_count_total{application=\u0026#34;myapp\u0026#34;,type=\u0026#34;order\u0026#34;,} 27.0 2hello_api_time_seconds_count{application=\u0026#34;myapp\u0026#34;,exception=\u0026#34;None\u0026#34;,method=\u0026#34;GET\u0026#34;,outcome=\u0026#34;SUCCESS\u0026#34;,status=\u0026#34;200\u0026#34;,uri=\u0026#34;/api/hello\u0026#34;,} 27.0 3hello_api_time_seconds_sum{application=\u0026#34;myapp\u0026#34;,exception=\u0026#34;None\u0026#34;,method=\u0026#34;GET\u0026#34;,outcome=\u0026#34;SUCCESS\u0026#34;,status=\u0026#34;200\u0026#34;,uri=\u0026#34;/api/hello\u0026#34;,} 102.162818601 4hello_api_time_seconds_max{application=\u0026#34;myapp\u0026#34;,exception=\u0026#34;None\u0026#34;,method=\u0026#34;GET\u0026#34;,outcome=\u0026#34;SUCCESS\u0026#34;,status=\u0026#34;200\u0026#34;,uri=\u0026#34;/api/hello\u0026#34;,} 0.0 Open Prometheus url: http://localhost:9090\nPrometheus should now start pulling data from the spring application. Click on status -\u0026gt; targets on prometheus dashboard to confirm that endpoint is up.\nQuery the metric hello_api_count_total and view as graph\nThe dashboard in Prometheus is minimal, to add more complex dashboard and visualization you can look at Grafana.\nOpen Grafana url: http://localhost:3000/\nLogin, you will need to change the password on first login\n1user: admin 2password: admin Add the prometheus data source, make sure its the ip address of your system, dont add localhost\nhttp://IP-ADDRESS:9090\nThere are existing grafana dashboards that can be imported. Import a dashboard, Download the json file or copy the ID of the dashboard for micrometer dashboard.\nhttps://grafana.com/dashboards/4701\nCreate a custom dashboard, Add a new panel, add 'hello_api_count_total' metric in the query, save the dashboard.\nReferences https://micrometer.io/docs\nhttps://prometheus.io/\nhttps://grafana.com/\nhttps://grafana.com/grafana/dashboards/4701\nhttps://grafana.com/grafana/dashboards/\n","link":"https://gitorko.github.io/post/spring-micrometer/","section":"post","tags":["spring","spring-boot","prometheus","grafana","jmx","micrometer"],"title":"Spring - Micrometer"},{"body":"Create a blog in github using Hugo.\nHugo You need go installed\n1brew install go 2 3$go version 4go version go1.14.6 darwin/amd64 Install Hugo\n1brew install hugo Create Site Create the site\n1hugo new site myblog 2cd myblog 3git init . 4git add . 5git commit -am \u0026#34;Base Commit\u0026#34; Theme Add clarity theme, this will also bring sample templates, that you can delete/modify\n1hugo mod init myblog 2wget -O - https://github.com/chipzoller/hugo-clarity/archive/master.tar.gz | tar xz \u0026amp;\u0026amp; cp -a hugo-clarity-master/exampleSite/* . \u0026amp;\u0026amp; rm -rf hugo-clarity-master \u0026amp;\u0026amp; rm -f config.toml 3git add . 4git commit -am \u0026#34;Theme Commit\u0026#34; In the file config/_default/config.toml\nChange the theme from 'theme = \u0026quot;hugo-clarity\u0026quot;' to 'theme = [\u0026quot;github.com/chipzoller/hugo-clarity\u0026quot;]'\nStart the hugo server locally\n1hugo server Theme update To update the clarity theme to take any latest changes to themes. Need not be done frequently.\n1hugo mod clean 2hugo mod get -u github.com/chipzoller/hugo-clarity Hugo Module update If you want to update all the hugo modules to use the latest version. Need not be done frequently.\n1hugo mod clean 2hugo mod get -u ./... Page Bundles We will use page bundles feature where the images and post reside in same folder as its easier to manage. To enable this add this to params.toml\n1usePageBundles = true Robots.txt Enable robots.txt in config.toml for google crawler to skip certain files, be sure to put this at the beginning of the file\n1enableRobotsTXT = true If you want you can add additional files by creating a robots.txt file under layouts\n1User-agent: * 2 3Disallow: /css/ 4Disallow: /en/ 5Disallow: /docs/ 6Disallow: /fonts/ 7Disallow: /js/ 8Disallow: /tags/ 9Disallow: /icons/ 10Disallow: /images/ 11Disallow: /showcase/ 12Disallow: /categories/ 13Disallow: /search/ Disqus comments Add disqus username to config.toml to allow comments on the blog\n1disqusShortname = \u0026#34;myusername\u0026#34; Menu Bar To modify the menu edit the menu.en.toml file\nFolders If you want additional folder modify the mainSections and add other folder names\n1mainSections = [\u0026#34;post\u0026#34;] Images Images can be added like\n1![](image-01.png) Table of Contents To add table of contents add the following in each posts .md file\n1toc: true Notices To post notices use the following code\n{{% notice note \u0026quot;Note Title\u0026quot; %}} This will be the content of the note. {{% /notice %}}\nEmbed Raw Github file Create a file called ghcode.md under layouts/shortcodes\n1{{ $file := .Get 0 }} 2{{ with resources.GetRemote $file }} 3 {{ with .Err }} 4 {{ errorf \u0026#34;%s\u0026#34; . }} 5 {{ else }} 6 {{ $lang := path.Ext $file | strings.TrimPrefix \u0026#34;.\u0026#34; }} 7 {{ highlight .Content $lang }} 8 {{ end }} 9{{ else }} 10 {{ errorf \u0026#34;Unable to get remote resource.\u0026#34; }} 11{{ end }} To use the tag in the post\n{{\u0026lt; ghcode \u0026quot;https://raw.githubusercontent.com/..file.java\u0026quot; \u0026gt;}\nEmbed Raw Markdown file Create a file called markcode.md under layouts/shortcodes\n1{{ $file := .Get 0 }} 2{{ with resources.GetRemote $file }} 3 {{ with .Err }} 4 {{ errorf \u0026#34;%s\u0026#34; . }} 5 {{ else }} 6 {{ .Content | $.Page.RenderString }} 7 {{ end }} 8{{ else }} 9 {{ errorf \u0026#34;Unable to get remote resource.\u0026#34; }} 10{{ end }} To use the tag in the post\n{{\u0026lt; markcode \u0026quot;https://raw.githubusercontent.com/../file.md\u0026quot; \u0026gt;}}\nSitemap Hugo generates a sitemap.xml that contains tags, categories and other taxonomies. To exclude them from Google search indexing, create a sitemap.xml under layouts.\n1{{ printf \u0026#34;\u0026lt;?xml version=\\\u0026#34;1.0\\\u0026#34; encoding=\\\u0026#34;utf-8\\\u0026#34; standalone=\\\u0026#34;yes\\\u0026#34;?\u0026gt;\u0026#34; | safeHTML }} 2\u0026lt;urlset xmlns=\u0026#34;http://www.sitemaps.org/schemas/sitemap/0.9\u0026#34; 3 xmlns:xhtml=\u0026#34;http://www.w3.org/1999/xhtml\u0026#34;\u0026gt; 4 {{ $exclude := slice \u0026#34;tags\u0026#34; \u0026#34;categories\u0026#34; }} 5 {{ range .Pages }} 6 {{ if not (in $exclude .Data.Plural) }} 7 \u0026lt;url\u0026gt; 8 \u0026lt;loc\u0026gt;{{ .Permalink }}\u0026lt;/loc\u0026gt;{{ if not .Lastmod.IsZero }} 9 \u0026lt;lastmod\u0026gt;{{ safeHTML ( .Lastmod.Format \u0026#34;2006-01-02T15:04:05-07:00\u0026#34; ) }}\u0026lt;/lastmod\u0026gt;{{ end }}{{ with .Sitemap.ChangeFreq }} 10 \u0026lt;changefreq\u0026gt;{{ . }}\u0026lt;/changefreq\u0026gt;{{ end }}{{ if ge .Sitemap.Priority 0.0 }} 11 \u0026lt;priority\u0026gt;{{ .Sitemap.Priority }}\u0026lt;/priority\u0026gt;{{ end }}{{ if .IsTranslated }}{{ range .Translations }} 12 \u0026lt;xhtml:link 13 rel=\u0026#34;alternate\u0026#34; 14 hreflang=\u0026#34;{{ .Language.Lang }}\u0026#34; 15 href=\u0026#34;{{ .Permalink }}\u0026#34; 16 /\u0026gt;{{ end }} 17 \u0026lt;xhtml:link 18 rel=\u0026#34;alternate\u0026#34; 19 hreflang=\u0026#34;{{ .Language.Lang }}\u0026#34; 20 href=\u0026#34;{{ .Permalink }}\u0026#34; 21 /\u0026gt;{{ end }} 22 \u0026lt;/url\u0026gt; 23 {{ end }} 24 {{ end }} 25\u0026lt;/urlset\u0026gt; Start Blog Run the server\n1hugo server Github Create a Github repository, It should be of the exact format \u0026lt;GITHUB-USERNAME\u0026gt;-github.io\nWe will create 2 branches in this repository where one branch will store the markdown content and other branch will store the live html site.\nUpdate the base url in config.toml\n1baseurl = \u0026#34;https://\u0026lt;GITHUB-USERNAME\u0026gt;.github.io/\u0026#34; Blog Commit 1git remote add origin https://github.com/\u0026lt;GITHUB-USERNAME\u0026gt;/\u0026lt;GITHUB-USERNAME\u0026gt;.github.io.git 2git branch -M blog 3git push -u origin blog Now your markdown files will be present on github under the branch blog.\nGithub Actions To automatically deploy the site on each commit, first create the github token under the repository\nYou need to generate a token if you dont have one already https://github.com/settings/tokens\nNote! The token must not be shared with anyone or uploaded in any static file or html.\nCreate a new workflow action and commit\nUse the below yaml\n1name: CI 2 3on: 4 push: 5 branches: 6 - blog 7 pull_request: 8 9jobs: 10 deploy: 11 runs-on: ubuntu-20.04 12 steps: 13 - uses: actions/checkout@v3 14 with: 15 submodules: true # Fetch Hugo themes (true OR recursive) 16 fetch-depth: 0 # Fetch all history for .GitInfo and .Lastmod 17 18 - name: Setup Hugo 19 uses: peaceiris/actions-hugo@v2 20 with: 21 hugo-version: \u0026#39;latest\u0026#39; 22 extended: true 23 24 - name: Build 25 run: hugo --minify 26 27 - name: Deploy 28 uses: peaceiris/actions-gh-pages@v3 29 if: github.ref == \u0026#39;refs/heads/blog\u0026#39; 30 with: 31 github_token: ${{ secrets.TOKEN }} 32 publish_dir: ./public By default the actions generate the live site in gh-pages branch, so goto github pages and change the branch to gh-pages and save.\nNow when you commit and push and changes to the blog branch, github actions automatically builds your site and deploys it.\nLegacy Deploy If you don't want use github actions to deploy the site then you can generate the site and publish it manually\nAdd a github submodule for the public folder\n1git submodule add -b gh-pages https://github.com/\u0026lt;GITHUB-USERNAME\u0026gt;/\u0026lt;GITHUB-USERNAME\u0026gt;.github.io.git public Generate the site in the public folder\n1hugo Note! Add public to .gitignore file so that public folder is not committed to the blog repo.\nCommit blog content (Markdown files) to the blog branch, double check to make sure public folder and its files are not part of this commit.\n1cd \u0026lt;GITHUB-USERNAME\u0026gt; 2git status 3git add . 4git commit -am \u0026#34;blog update\u0026#34; 5git push origin blog Commit \u0026amp; push site (HTML files) to the gh-pages branch\n1cd \u0026lt;GITHUB-USERNAME\u0026gt;/public 2git add . 3git commit -am \u0026#34;Live HTML\u0026#34; 4git push origin gh-pages You should now be seeing the public html files in your .github.io.git repostiory in the gh-pages branch.\nOpen url https://.github.io/ and your blog should be up.\nGoogle Analytics Modify the params.toml and add your google analytics tracking id.\n1ga_analytics = \u0026#34;\u0026lt;YOUR_VALUE\u0026gt;\u0026#34; This will help you track your website traffic\nGoogle Search Indexing Google search will not include your blog.\nTo get your site to show up in google search ensure there is a sitemap.xml.\nhttp://.github.io/sitemap.xml\nLogin to google search console https://search.google.com/search-console and add your blog\nCopy the hmtl file to static folder for site verification\nIt will take an hour for the site to be indexed and show up on search results.\nMarkdown Learn markdown syntax https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet\nReferences https://github.com/chipzoller/hugo-clarity\nhttps://gohugo.io/\nhttps://gohugo.io/hosting-and-deployment/hosting-on-github/\n","link":"https://gitorko.github.io/post/github-blog-with-hugo/","section":"post","tags":["hugo"],"title":"Github Blog with Hugo"},{"body":"","link":"https://gitorko.github.io/tags/hugo/","section":"tags","tags":null,"title":"Hugo"},{"body":"","link":"https://gitorko.github.io/categories/hugo/","section":"categories","tags":null,"title":"Hugo"},{"body":"","link":"https://gitorko.github.io/tags/drools/","section":"tags","tags":null,"title":"Drools"},{"body":"","link":"https://gitorko.github.io/categories/drools/","section":"categories","tags":null,"title":"Drools"},{"body":"Spring boot integration with Drools.\nGithub: https://github.com/gitorko/project63\nDrools Drools is a Business Rule Management System (BRMS). Business \u0026amp; Non-Technical users can write the rules in a format that is easy to understand and plug it into drools engine. These rules/facts are processed to produce results. Cost of changing the rules is low.\nCode 1package com.demo.project63; 2 3import java.util.Arrays; 4import java.util.Collections; 5import java.util.HashMap; 6import java.util.Map; 7 8import lombok.extern.slf4j.Slf4j; 9import org.kie.api.KieServices; 10import org.kie.api.builder.KieBuilder; 11import org.kie.api.builder.KieFileSystem; 12import org.kie.api.builder.KieModule; 13import org.kie.api.runtime.KieContainer; 14import org.kie.api.runtime.KieSession; 15import org.kie.internal.io.ResourceFactory; 16import org.springframework.boot.CommandLineRunner; 17import org.springframework.boot.SpringApplication; 18import org.springframework.boot.autoconfigure.SpringBootApplication; 19import org.springframework.context.annotation.Bean; 20 21@SpringBootApplication 22@Slf4j 23public class Main { 24 25 public static void main(String[] args) { 26 SpringApplication.run(Main.class, args); 27 } 28 29 @Bean 30 public CommandLineRunner sendData(KieContainer kContainer) { 31 return args -\u0026gt; { 32 //Simple 33 KieSession kieSession1 = kContainer.newKieSession(); 34 Product p1 = new Product(); 35 p1.setType(\u0026#34;desktop\u0026#34;); 36 p1.setRegions(Collections.emptyMap()); 37 p1.setManufacturers(Collections.emptyList()); 38 kieSession1.insert(p1); 39 kieSession1.fireAllRules(); 40 kieSession1.dispose(); 41 log.info(\u0026#34;Discount on {} is {}\u0026#34;, p1.getType(), p1.getDiscount()); 42 43 //Iterates a Map. 44 KieSession kieSession2 = kContainer.newKieSession(); 45 Product p2 = new Product(); 46 p2.setType(\u0026#34;laptop\u0026#34;); 47 Map\u0026lt;String, String\u0026gt; r2 = new HashMap\u0026lt;\u0026gt;(); 48 r2.put(\u0026#34;region1\u0026#34;, \u0026#34;A\u0026#34;); 49 r2.put(\u0026#34;region2\u0026#34;, \u0026#34;B\u0026#34;); 50 r2.put(\u0026#34;region3\u0026#34;, \u0026#34;C\u0026#34;); 51 p2.setRegions(r2); 52 p2.setManufacturers(Collections.emptyList()); 53 kieSession2.insert(p2); 54 kieSession2.fireAllRules(); 55 kieSession2.dispose(); 56 log.info(\u0026#34;Discount on {} is {}\u0026#34;, p2.getType(), p2.getDiscount()); 57 58 //Iterates List 59 KieSession kieSession3 = kContainer.newKieSession(); 60 Product p3 = new Product(); 61 p3.setType(\u0026#34;keyboard\u0026#34;); 62 p3.setRegions(Collections.emptyMap()); 63 p3.setManufacturers(Arrays.asList(\u0026#34;Company1\u0026#34;, \u0026#34;Company2\u0026#34;)); 64 kieSession3.insert(p3); 65 kieSession3.fireAllRules(); 66 kieSession3.dispose(); 67 log.info(\u0026#34;Discount on {} is {}\u0026#34;, p3.getType(), p3.getDiscount()); 68 }; 69 } 70 71 @Bean 72 public KieContainer kieContainer() { 73 KieServices kieServices = KieServices.Factory.get(); 74 KieFileSystem kieFileSystem = kieServices.newKieFileSystem(); 75 kieFileSystem.write(ResourceFactory.newClassPathResource(\u0026#34;product-discount.drl\u0026#34;)); 76 KieBuilder kieBuilder = kieServices.newKieBuilder(kieFileSystem); 77 kieBuilder.buildAll(); 78 KieModule kieModule = kieBuilder.getKieModule(); 79 return kieServices.newKieContainer(kieModule.getReleaseId()); 80 } 81 82} 1import com.demo.project63.Product; 2import java.util.Map; 3 4rule \u0026#34;Discount Based on Product\u0026#34; 5\twhen 6\t$product: Product(type == \u0026#34;desktop\u0026#34;) 7\tthen 8\tSystem.out.println(\u0026#34;Discount provided for product\u0026#34;); 9\t$product.setDiscount(15); 10\tend 11 12 13rule \u0026#34;Discount Based on Store A,B,C\u0026#34; 14\twhen 15\t$product: Product($regions : regions) 16\t$region: Map() from $regions 17\t$entry: Map.Entry( $key: key, $val: value ) from $region.entrySet() 18\teval($val.equals(\u0026#34;A\u0026#34;) || $val.equals(\u0026#34;B\u0026#34;) || $val.equals(\u0026#34;C\u0026#34;)) 19\tthen 20\tSystem.out.println(\u0026#34;Discount provided for product in specific region\u0026#34;); 21\t$product.setDiscount(10); 22\tend 23 24rule \u0026#34;Discount Based Manufacturer\u0026#34; 25\twhen 26\t$product: Product($manufacturers : manufacturers) 27\t$manufacturer: String() from $manufacturers 28\teval($manufacturer.equals(\u0026#34;Company1\u0026#34;) || $manufacturer.equals(\u0026#34;Company2\u0026#34;)) 29\tthen 30\tSystem.out.println(\u0026#34;Discount provided for manufacturer\u0026#34;); 31\t$product.setDiscount(5); 32\tend Setup 1# Project 63 2 3Spring Boot - Drools 4 5[https://gitorko.github.io/spring-boot-drools/](https://gitorko.github.io/spring-boot-drools/) 6 7### Version 8 9Check version 10 11```bash 12$java --version 13openjdk version \u0026#34;21.0.3\u0026#34; 2024-04-16 LTS 14``` 15 16### Dev 17 18To run code. 19 20```bash 21./gradlew clean build 22./gradlew bootRun 23``` References https://www.drools.org/\n","link":"https://gitorko.github.io/post/spring-drools/","section":"post","tags":["spring","spring-boot","drools"],"title":"Spring - Drools"},{"body":"Java Design Patterns - Creational, Structural \u0026amp; Behavioral design patterns.\nGithub: https://github.com/gitorko/project01\nCreational Design Patterns Provides way to create objects while hiding the creation logic.\n1. Singleton Pattern Singleton pattern ensures that only one instance of the class exists in the java virtual machine.\nA singleton class has these common features\nprivate constructor to restrict creation of instance by other classes. private static variable of the same class. public static method to get instance of class. We will first look at eager loaded singleton. This is costly as object is created at time of class loading,also no scope for exception handling if instantiation fails.\n1package com.demo.basics.designpatterns._01_singleton; 2 3import org.junit.jupiter.api.Assertions; 4import org.junit.jupiter.api.Test; 5 6public class EagerLoadedSingleton { 7 8 private static final EagerLoadedSingleton instance = new EagerLoadedSingleton(); 9 10 private EagerLoadedSingleton() { 11 } 12 13 public static EagerLoadedSingleton getInstance() { 14 return instance; 15 } 16 17 @Test 18 public void test() { 19 Assertions.assertEquals(\u0026#34;Hello from EagerLoadedSingleton!\u0026#34;, EagerLoadedSingleton.getInstance().hello()); 20 } 21 22 public String hello() { 23 return (\u0026#34;Hello from EagerLoadedSingleton!\u0026#34;); 24 } 25} This can be modified to static block singleton which provides room for handling exception.\n1package com.demo.basics.designpatterns._01_singleton; 2 3import org.junit.jupiter.api.Assertions; 4import org.junit.jupiter.api.Test; 5 6public class StaticBlockSingleton { 7 8 private static final StaticBlockSingleton instance; 9 10 static { 11 try { 12 instance = new StaticBlockSingleton(); 13 } catch (Exception e) { 14 throw new RuntimeException(\u0026#34;Exception occurred in creating singleton instance\u0026#34;); 15 } 16 } 17 18 private StaticBlockSingleton() { 19 } 20 21 public static StaticBlockSingleton getInstance() { 22 return instance; 23 } 24 25 @Test 26 public void test() { 27 Assertions.assertEquals(\u0026#34;Hello from StaticBlockSingleton!\u0026#34;, StaticBlockSingleton.getInstance().hello()); 28 } 29 30 public String hello() { 31 return (\u0026#34;Hello from StaticBlockSingleton!\u0026#34;); 32 } 33} The next step is to use lazy initialization singleton as creating singleton at class loading time and not using it will be costly.\n1package com.demo.basics.designpatterns._01_singleton; 2 3import org.junit.jupiter.api.Assertions; 4import org.junit.jupiter.api.Test; 5 6public class LazyLoadedSingleton { 7 8 private static LazyLoadedSingleton instance; 9 10 private LazyLoadedSingleton() { 11 } 12 13 public static LazyLoadedSingleton getInstance() { 14 if (instance == null) { 15 instance = new LazyLoadedSingleton(); 16 } 17 return instance; 18 } 19 20 @Test 21 public void test() { 22 Assertions.assertEquals(\u0026#34;Hello from LazyLoadedSingleton!\u0026#34;, LazyLoadedSingleton.getInstance().hello()); 23 } 24 25 public String hello() { 26 return (\u0026#34;Hello from LazyLoadedSingleton!\u0026#34;); 27 } 28} However this is not thread safe as in multithread environment 2 threads can get 2 different instances of the object. So lets make this thread safe. Notice we introduced synchronized keyword on the getInstance method.\n1package com.demo.basics.designpatterns._01_singleton; 2 3import org.junit.jupiter.api.Assertions; 4import org.junit.jupiter.api.Test; 5 6public class ThreadSafeSingleton { 7 8 private static ThreadSafeSingleton instance; 9 10 private ThreadSafeSingleton() { 11 } 12 13 public static synchronized ThreadSafeSingleton getInstance() { 14 if (instance == null) { 15 instance = new ThreadSafeSingleton(); 16 } 17 return instance; 18 } 19 20 @Test 21 public void test() { 22 Assertions.assertEquals(\u0026#34;Hello from ThreadSafeSingleton!\u0026#34;, ThreadSafeSingleton.getInstance().hello()); 23 } 24 25 public String hello() { 26 return (\u0026#34;Hello from ThreadSafeSingleton!\u0026#34;); 27 } 28} The above program is thread safe but reduces performance as each thread waits to enter the synchronized block. We now fix that by introducing double check locking. Notice that we removed the synchronized keyword on the getInstance method and moved it inside the method. We now perform 2 if checks on the instance.\n1package com.demo.basics.designpatterns._01_singleton; 2 3import org.junit.jupiter.api.Test; 4 5public class ThreadSafeSingletonDoubleCheckLock { 6 7 private static ThreadSafeSingletonDoubleCheckLock instance; 8 9 private ThreadSafeSingletonDoubleCheckLock() { 10 } 11 12 @Test 13 public void test() { 14 System.out.println(ThreadSafeSingletonDoubleCheckLock.getInstance().hello()); 15 } 16 17 public static ThreadSafeSingletonDoubleCheckLock getInstance() { 18 if (instance == null) { 19 synchronized (ThreadSafeSingletonDoubleCheckLock.class) { 20 if (instance == null) { 21 instance = new ThreadSafeSingletonDoubleCheckLock(); 22 } 23 } 24 25 } 26 return instance; 27 } 28 29 public String hello() { 30 return (\u0026#34;Hello from ThreadSafeSingleton!\u0026#34;); 31 } 32} Using reflection all previous singleton implementation can be broken\n1package com.demo.basics.designpatterns._01_singleton; 2 3import java.lang.reflect.Constructor; 4import java.lang.reflect.InvocationTargetException; 5 6import org.junit.jupiter.api.Assertions; 7import org.junit.jupiter.api.Test; 8 9public class BreakSingletonByReflection { 10 11 private static boolean testSingleton() { 12 ThreadSafeSingletonDoubleCheckLock instanceOne = ThreadSafeSingletonDoubleCheckLock.getInstance(); 13 ThreadSafeSingletonDoubleCheckLock instanceTwo = null; 14 try { 15 Constructor[] constructors = ThreadSafeSingletonDoubleCheckLock.class.getDeclaredConstructors(); 16 for (Constructor constructor : constructors) { 17 constructor.setAccessible(true); 18 instanceTwo = (ThreadSafeSingletonDoubleCheckLock) constructor.newInstance(); 19 break; 20 } 21 } catch (InstantiationException | IllegalAccessException | IllegalArgumentException 22 | InvocationTargetException ex) { 23 ex.printStackTrace(); 24 } 25 if (instanceOne.hashCode() != instanceTwo.hashCode()) { 26 System.out.println(\u0026#34;Singleton broken as hashcode differs!\u0026#34;); 27 return false; 28 } 29 return true; 30 } 31 32 @Test 33 public void test() { 34 Assertions.assertFalse(testSingleton()); 35 } 36 37} To safeguard against reflection we will throw RuntimeException in the constructor. We will introduce the volatile keyword to make it even more thread safe.\nHow volatile works in java? The volatile keyword in Java is used as an indicator to Java compiler and Thread that do not cache value of this variable and always read it from main memory. Java volatile keyword also guarantees visibility and ordering, write to any volatile variable happens before any read into the volatile variable. It also prevents compiler or JVM from the reordering of code.\nIf we do not make the instance variable volatile than the Thread which is creating instance of Singleton is not able to communicate to the other thread, that the instance has been created until it comes out of the Singleton block, so if Thread A is creating Singleton instance and just after creation lost the CPU, all other thread will not be able to see value of instance as not null and they will believe its still null. By adding volatile java will not read the variable into thread context local memory and instead read it from the main memory each time.\n1package com.demo.basics.designpatterns._01_singleton; 2 3import org.junit.jupiter.api.Assertions; 4import org.junit.jupiter.api.Test; 5 6public class SingletonDefendReflection { 7 8 private static volatile SingletonDefendReflection instance; 9 10 private SingletonDefendReflection() { 11 if (instance != null) { 12 throw new RuntimeException(\u0026#34;Use get instance to create object!\u0026#34;); 13 } 14 } 15 16 @Test 17 public void test() { 18 Assertions.assertEquals(\u0026#34;Hello from ThreadSafeSingleton!\u0026#34;, SingletonDefendReflection.getInstance().hello()); 19 } 20 21 public static SingletonDefendReflection getInstance() { 22 if (instance == null) { 23 synchronized (SingletonDefendReflection.class) { 24 if (instance == null) { 25 instance = new SingletonDefendReflection(); 26 } 27 } 28 } 29 return instance; 30 } 31 32 public String hello() { 33 return (\u0026#34;Hello from ThreadSafeSingleton!\u0026#34;); 34 } 35} To defend against reflection you can also use Enum based singleton, The disadvantage is you cant do lazy loading, you cant extend the singleton.\n1package com.demo.basics.designpatterns._01_singleton; 2 3import org.junit.jupiter.api.Assertions; 4import org.junit.jupiter.api.Test; 5 6public class EnumSingleton { 7 8 @Test 9 public void test() { 10 Assertions.assertEquals(\u0026#34;Hello from EnumSingleton!\u0026#34;, EnumSingleClass.INSTANCE.hello()); 11 } 12 13 enum EnumSingleClass { 14 INSTANCE; 15 16 public String hello() { 17 return (\u0026#34;Hello from EnumSingleton!\u0026#34;); 18 } 19 } 20} There is another approach of writing a singleton called Bill Pugh Singleton implementation which uses static inner helper class instead of using synchronized keyword.\n1package com.demo.basics.designpatterns._01_singleton; 2 3import org.junit.jupiter.api.Assertions; 4import org.junit.jupiter.api.Test; 5 6public class BillPughSingleton { 7 8 private BillPughSingleton() { 9 } 10 11 public static BillPughSingleton getInstance() { 12 return SingletonHelper.INSTANCE; 13 } 14 15 @Test 16 public void test() { 17 Assertions.assertEquals(\u0026#34;Hello from BillPughSingleton!\u0026#34;, BillPughSingleton.getInstance().hello()); 18 } 19 20 public String hello() { 21 return \u0026#34;Hello from BillPughSingleton!\u0026#34;; 22 } 23 24 private static class SingletonHelper { 25 private static final BillPughSingleton INSTANCE = new BillPughSingleton(); 26 } 27} In a distributed systems a singleton needs to be serialized and restored from store later and care must be taken to ensure that new instance is not created and the same instance that was serialized is restored. Notice the method readResolve if this method is removed then the singleton design breaks during de-serialization.\n1package com.demo.basics.designpatterns._01_singleton; 2 3import java.io.FileInputStream; 4import java.io.FileOutputStream; 5import java.io.ObjectInput; 6import java.io.ObjectInputStream; 7import java.io.ObjectOutput; 8import java.io.ObjectOutputStream; 9import java.io.Serializable; 10 11import lombok.SneakyThrows; 12import org.junit.jupiter.api.Assertions; 13import org.junit.jupiter.api.Test; 14 15public class SerializedSingleton implements Serializable { 16 17 private static final long serialVersionUID = -1L; 18 19 private SerializedSingleton() { 20 } 21 22 public static SerializedSingleton getInstance() { 23 return SingletonHelper.instance; 24 } 25 26 @Test 27 public void test() throws Exception { 28 SerializedSingleton instanceOne = SerializedSingleton.getInstance(); 29 serialize(instanceOne); 30 SerializedSingleton instanceTwo = deserialize(); 31 Assertions.assertEquals(instanceOne.hashCode(), instanceTwo.hashCode()); 32 } 33 34 @SneakyThrows 35 public void serialize(SerializedSingleton instanceOne) { 36 ObjectOutput out = new ObjectOutputStream(new FileOutputStream(\u0026#34;filename.ser\u0026#34;)); 37 out.writeObject(instanceOne); 38 out.close(); 39 } 40 41 @SneakyThrows 42 public SerializedSingleton deserialize() { 43 ObjectInput in = new ObjectInputStream(new FileInputStream(\u0026#34;filename.ser\u0026#34;)); 44 SerializedSingleton instanceTwo = (SerializedSingleton) in.readObject(); 45 in.close(); 46 return instanceTwo; 47 } 48 49 public String hello() { 50 return (\u0026#34;Hello from singleton!\u0026#34;); 51 } 52 53 protected Object readResolve() { 54 return getInstance(); 55 } 56 57 private static class SingletonHelper { 58 private static final SerializedSingleton instance = new SerializedSingleton(); 59 } 60 61} A singleton example within java sdk is the Runtime class for garbage collection.\n1package com.demo.basics.designpatterns._01_singleton; 2 3import org.junit.jupiter.api.Test; 4 5public class RuntimeSingleton { 6 @Test 7 public void test() { 8 Runtime singleton1 = Runtime.getRuntime(); 9 singleton1.gc(); 10 Runtime singleton2 = Runtime.getRuntime(); 11 if (singleton1 == singleton2) { 12 System.out.println(\u0026#34;Singleton!\u0026#34;); 13 } else { 14 System.out.println(\u0026#34;Not Singleton!\u0026#34;); 15 } 16 } 17} Why not use a static class instead of writing a singleton class? Because static class doesnt guarantee thread safety.\nCan i have parameters in a singleton? A singleton constructor cant take parameters that violates the rule of singleton. If there are parameters then it classifies as a factory pattern.\nIf singleton is unique instance per JVM instance how does it work in a tomcat server which can have 2 instances of same web application deployed on it. Since the applications still run on single JVM will they share the singleton? In this case both web applications will get their own instance of singleton because of class loader visibility.Tomcat uses individual class loaders for webapps. However if both application request a JRE or Tomcat singleton eg: Runtime then both get the same singleton.\n2. Factory Pattern Factory design pattern is used when we have a super class with multiple sub-classes and based on input, we need to return one of the sub-class. The main method doesnt know the details of instantiating a object its deferred to the factory subclass. Factory calls the new operator.\n1package com.demo.basics.designpatterns._02_factory; 2 3import org.junit.jupiter.api.Assertions; 4import org.junit.jupiter.api.Test; 5 6enum AnimalType { 7 DOG, DUCK, CAT; 8} 9 10interface Animal { 11 String sound(); 12} 13 14public class FactoryPatternTest { 15 16 @Test 17 public void test() { 18 Animal animal = Factory.getAnimal(AnimalType.CAT); 19 Assertions.assertEquals(\u0026#34;Meow!\u0026#34;, animal.sound()); 20 } 21} 22 23class Duck implements Animal { 24 25 @Override 26 public String sound() { 27 return \u0026#34;Quak!\u0026#34;; 28 } 29} 30 31class Dog implements Animal { 32 33 @Override 34 public String sound() { 35 return \u0026#34;Bark!\u0026#34;; 36 } 37} 38 39class Cat implements Animal { 40 41 @Override 42 public String sound() { 43 return \u0026#34;Meow!\u0026#34;; 44 } 45} 46 47class Factory { 48 public static Animal getAnimal(AnimalType type) { 49 switch (type) { 50 case DOG: 51 return new Dog(); 52 case CAT: 53 return new Cat(); 54 case DUCK: 55 return new Duck(); 56 default: 57 return null; 58 } 59 } 60} 3. Abstract Factory Pattern Abstract factory pattern is similar to Factory pattern and it’s factory of factories. In factory pattern we used switch statement to decide which object to return in abstract factory we remove the if-else/switch block and have a factory class for each sub-class.\n1package com.demo.basics.designpatterns._03_abstractfactory; 2 3import org.junit.jupiter.api.Assertions; 4import org.junit.jupiter.api.Test; 5 6interface Animal { 7 String sound(); 8} 9 10interface AnimalFactory { 11 Animal createAnimal(); 12} 13 14public class AbstractFactoryPatternTest { 15 16 @Test 17 public void test() { 18 Animal animal = AnimalAbstractFactory.getAnimal(new DogFactory()); 19 Assertions.assertEquals(\u0026#34;Bark!\u0026#34;, animal.sound()); 20 } 21} 22 23class Duck implements Animal { 24 @Override 25 public String sound() { 26 return \u0026#34;Quak!\u0026#34;; 27 } 28} 29 30class Dog implements Animal { 31 @Override 32 public String sound() { 33 return \u0026#34;Bark!\u0026#34;; 34 } 35} 36 37class Cat implements Animal { 38 @Override 39 public String sound() { 40 return \u0026#34;Meow!\u0026#34;; 41 } 42} 43 44class AnimalAbstractFactory { 45 public static Animal getAnimal(AnimalFactory bf) { 46 return bf.createAnimal(); 47 } 48} 49 50class DuckFactory implements AnimalFactory { 51 @Override 52 public Animal createAnimal() { 53 return new Duck(); 54 } 55} 56 57class DogFactory implements AnimalFactory { 58 @Override 59 public Animal createAnimal() { 60 return new Dog(); 61 } 62} 63 64class CatFactory implements AnimalFactory { 65 @Override 66 public Animal createAnimal() { 67 return new Cat(); 68 } 69} 4. Builder Pattern Builder pattern is used to build a complex object with lot of attributes. It becomes difficult to pass the correct type in correct order to a constructor when there are many attributes. If some of the attributes are optional then there is overhead of having to pass null each time to the constructor or having to write multiple constructors(telescoping). Notice that in the example below builder pattern returns immutable object hence no setter methods exist. Notice the static inner class you can write an external class as well if you choose not to modify an existing class. Notice the private constructor of the Dog class as the only way to create an instance is via Builder. The name of dog and breed are the only mandatory fields this defines a contract that a dog object atleast needs these 2 attributes.\n1package com.demo.basics.designpatterns._04_builder; 2 3import lombok.Builder; 4import lombok.Getter; 5import lombok.ToString; 6import org.junit.jupiter.api.Assertions; 7import org.junit.jupiter.api.Test; 8 9public class BuilderPatternTest { 10 11 @Test 12 public void test() { 13 Dog dog1 = new Dog.DogBuilder().setName(\u0026#34;Rocky\u0026#34;).setBreed(\u0026#34;German Shepherd\u0026#34;).setColor(\u0026#34;Grey\u0026#34;).setAge(6).setWeight(40.5).build(); 14 Assertions.assertEquals(40.5, dog1.getWeight()); 15 Dog dog2 = new Dog.DogBuilder().setName(\u0026#34;Rocky\u0026#34;).setBreed(\u0026#34;German Shepherd\u0026#34;).build(); 16 Assertions.assertEquals(30.0, dog2.getWeight()); 17 18 Cat cat = Cat.builder().name(\u0026#34;Fluffy\u0026#34;).breed(\u0026#34;Egyptian\u0026#34;).build(); 19 Assertions.assertEquals(10.0, cat.getWeight()); 20 } 21 22} 23 24@Getter 25@ToString 26class Dog { 27 28 private String name; 29 private String breed; 30 private String color; 31 private int age; 32 private double weight; 33 34 private Dog(DogBuilder builder) { 35 this.name = builder.name; 36 this.breed = builder.breed; 37 this.color = builder.color; 38 this.age = builder.age; 39 this.weight = builder.weight; 40 } 41 42 @Getter 43 public static class DogBuilder { 44 45 private String name; 46 private String breed; 47 private String color; 48 private int age; 49 private double weight; 50 51 public DogBuilder() { 52 this.weight = 30.0; 53 } 54 55 public Dog build() { 56 return new Dog(this); 57 } 58 59 public DogBuilder setName(String name) { 60 this.name = name; 61 return this; 62 } 63 64 public DogBuilder setBreed(String breed) { 65 this.breed = breed; 66 return this; 67 } 68 69 public DogBuilder setColor(String color) { 70 this.color = color; 71 return this; 72 } 73 74 public DogBuilder setAge(int age) { 75 this.age = age; 76 return this; 77 } 78 79 public DogBuilder setWeight(double weight) { 80 this.weight = weight; 81 return this; 82 } 83 } 84} 85 86@Builder 87@Getter 88@ToString 89class Cat { 90 91 private String name; 92 private String breed; 93 private String color; 94 private int age; 95 @Builder.Default 96 private double weight = 10.0; 97} Using lombok @Builder annotation you can reduce the code further\nAn example in the java SDK is the StringBuilder class.\n5. Prototype Pattern Prototype pattern is used when the object creation is expensive. Instead of creating a new object you can copy the original object using clone and then modify it according to your needs. Prototype design pattern mandates that the object which you are copying should provide the copying feature, it should not be done by any other class. Decision to use shallow or deep copy of the object attributes is a design decision a shallow copy just copies immediate property and deep copy copies all object references as well. Notice we dont use new to create prototype objects after the first instance is created. Prototype avoid subclassing.\n1package com.demo.basics.designpatterns._05_prototype; 2 3import java.util.ArrayList; 4import java.util.List; 5 6import lombok.AllArgsConstructor; 7import lombok.Data; 8import org.junit.jupiter.api.Assertions; 9import org.junit.jupiter.api.Test; 10 11public class PrototypePatternTest { 12 13 @Test 14 public void test() throws CloneNotSupportedException { 15 16 Employees empList = new Employees(new ArrayList\u0026lt;\u0026gt;()); 17 empList.seedData(); 18 Employees dataSet1 = (Employees) empList.clone(); 19 Employees dataSet2 = (Employees) empList.clone(); 20 Assertions.assertEquals(dataSet1.getEmpList().size(), dataSet2.getEmpList().size()); 21 22 dataSet2.getEmpList().add(\u0026#34;john\u0026#34;); 23 Assertions.assertNotEquals(dataSet1.getEmpList().size(), dataSet2.getEmpList().size()); 24 } 25 26} 27 28@AllArgsConstructor 29@Data 30class Employees implements Cloneable { 31 32 private List\u0026lt;String\u0026gt; empList; 33 34 public void seedData() { 35 //Invoke a remote call and fetch data and load it to list. The fetch is costly operation. 36 for (int i = 0; i \u0026lt; 100; i++) { 37 empList.add(\u0026#34;employee_\u0026#34; + i); 38 } 39 } 40 41 @Override 42 public Object clone() throws CloneNotSupportedException { 43 List\u0026lt;String\u0026gt; temp = new ArrayList\u0026lt;\u0026gt;(); 44 for (String s : this.empList) { 45 temp.add(s); 46 } 47 return new Employees(temp); 48 } 49} You can also create a registry to stored newly created objects when there are different types of objects and lookup against the registry when you want to clone objects.\nStructural Design Patterns Deal with class and object composition. Provide different ways to create a class structure, using inheritance and composition to create a large object from small objects\n1. Adapter Pattern Adapter pattern is used when two unrelated interfaces need to work together. There is a AlienCraft which has different type of fire \u0026amp; scan api that takes additional parameter compared to the human readable ship interface. However by writing the adapter we map the appropriate functions for fire and scan.\n1package com.demo.basics.designpatterns._06_adapter; 2 3import lombok.AllArgsConstructor; 4import org.junit.jupiter.api.Assertions; 5import org.junit.jupiter.api.Test; 6 7interface Ship { 8 String scan(); 9 10 String fire(); 11} 12 13public class AdapterPatternTest { 14 15 @Test 16 public void test() { 17 SpaceShipAdapter shipAdapter = new SpaceShipAdapter(new AlienCraft()); 18 Assertions.assertEquals(\u0026#34;Scanning enemy\u0026#34;, shipAdapter.scan()); 19 Assertions.assertEquals(\u0026#34;Firing weapon\u0026#34;, shipAdapter.fire()); 20 } 21} 22 23class AlienCraft { 24 public String drakarys() { 25 return \u0026#34;Firing weapon\u0026#34;; 26 } 27 28 public String jorarghugon() { 29 return \u0026#34;Scanning enemy\u0026#34;; 30 } 31} 32 33class EnterpriseCraft { 34 public String zapIt() { 35 return \u0026#34;Firing weapon\u0026#34;; 36 } 37 38 public String acquireTarget() { 39 return \u0026#34;Scanning enemy\u0026#34;; 40 } 41} 42 43@AllArgsConstructor 44class SpaceShipAdapter implements Ship { 45 AlienCraft ship; 46 47 @Override 48 public String scan() { 49 return ship.jorarghugon(); 50 } 51 52 @Override 53 public String fire() { 54 return ship.drakarys(); 55 } 56 57} UML Diagram Adapter design pattern.\n2. Composite Pattern Composite pattern is used when we have to represent a part-whole hierarchy.A group of objects should behave in a similar way,tree like structure. Here we have a playlist which can contain songs or other playlist and those playlist can have songs of their own.\n1package com.demo.basics.designpatterns._07_composite; 2 3import java.util.ArrayList; 4import java.util.List; 5 6import lombok.AllArgsConstructor; 7import lombok.RequiredArgsConstructor; 8import org.junit.jupiter.api.Test; 9 10/** 11 * When the group of objects should behave as the single object 12 */ 13public class CompositePatternTest { 14 15 @Test 16 public void test() { 17 SongComponent playList1 = new PlayList(\u0026#34;playlist_1\u0026#34;); 18 SongComponent playList2 = new PlayList(\u0026#34;playlist_2\u0026#34;); 19 SongComponent playList3 = new PlayList(\u0026#34;playlist_3\u0026#34;); 20 21 playList1.add(new Song(\u0026#34;Song1\u0026#34;)); 22 playList2.add(new Song(\u0026#34;Song2\u0026#34;)); 23 playList2.add(new Song(\u0026#34;Song3\u0026#34;)); 24 playList3.add(playList1); 25 playList3.add(playList2); 26 playList3.add(new Song(\u0026#34;Song4\u0026#34;)); 27 playList3.displaySongInfo(); 28 } 29} 30 31abstract class SongComponent { 32 33 public void add(SongComponent c) { 34 throw new UnsupportedOperationException(); 35 } 36 37 public String getSong() { 38 throw new UnsupportedOperationException(); 39 } 40 41 public void displaySongInfo() { 42 throw new UnsupportedOperationException(); 43 } 44} 45 46@RequiredArgsConstructor 47class PlayList extends SongComponent { 48 49 final String playListName; 50 List\u0026lt;SongComponent\u0026gt; componentLst = new ArrayList\u0026lt;\u0026gt;(); 51 52 @Override 53 public void add(SongComponent c) { 54 componentLst.add(c); 55 } 56 57 @Override 58 public void displaySongInfo() { 59 System.out.println(\u0026#34;Playlist Name: \u0026#34; + playListName); 60 for (SongComponent s : componentLst) { 61 s.displaySongInfo(); 62 } 63 } 64} 65 66@AllArgsConstructor 67class Song extends SongComponent { 68 String songName; 69 70 @Override 71 public String getSong() { 72 return songName; 73 } 74 75 @Override 76 public void displaySongInfo() { 77 System.out.println(\u0026#34;Song: \u0026#34; + songName); 78 } 79} 3. Proxy Pattern Proxy pattern is used when we want to provide controlled access of a functionality. A real world example would be when a lawyer restricts the questions police would ask a mob boss. You can add only one proxy per class.\n1package com.demo.basics.designpatterns._08_proxy; 2 3import org.junit.jupiter.api.Test; 4 5interface Command { 6 void runCommand(String cmd); 7} 8 9public class ProxyPatternTest { 10 11 @Test 12 public void test() { 13 Proxy proxy = new Proxy(); 14 proxy.runCommand(\u0026#34;rm\u0026#34;); 15 proxy.runCommand(\u0026#34;dir\u0026#34;); 16 } 17} 18 19class CommandImpl implements Command { 20 21 @Override 22 public void runCommand(String cmd) { 23 System.out.println(\u0026#34;Running : \u0026#34; + cmd); 24 } 25} 26 27class Proxy implements Command { 28 29 Command cmdObj; 30 31 public Proxy() { 32 this.cmdObj = new CommandImpl(); 33 } 34 35 @Override 36 public void runCommand(String cmd) { 37 if (cmd.contains(\u0026#34;rm\u0026#34;)) { 38 System.out.println(\u0026#34;Cant run rm\u0026#34;); 39 } else { 40 cmdObj.runCommand(cmd); 41 } 42 } 43 44} A much more generic way to doing this using default java class InvocationHandler is shown below.\n1package com.demo.basics.designpatterns._08_proxy_invocationhandler; 2 3import java.lang.reflect.InvocationHandler; 4import java.lang.reflect.InvocationTargetException; 5import java.lang.reflect.Method; 6 7import org.junit.jupiter.api.Assertions; 8import org.junit.jupiter.api.Test; 9 10interface Command { 11 void runCommand(String cmd); 12} 13 14public class ProxyHandlerPatternTest { 15 16 @Test 17 public void test() { 18 Command cmd = (Command) CommandProxy.newInstance(new CommandImpl()); 19 cmd.runCommand(\u0026#34;ls\u0026#34;); 20 Assertions.assertThrows(RuntimeException.class, () -\u0026gt; cmd.runCommand(\u0026#34;rm\u0026#34;)); 21 } 22 23} 24 25class CommandImpl implements Command { 26 27 @Override 28 public void runCommand(String cmd) { 29 System.out.println(\u0026#34;Running : \u0026#34; + cmd); 30 } 31} 32 33class CommandProxy implements InvocationHandler { 34 private Object obj; 35 36 private CommandProxy(Object obj) { 37 this.obj = obj; 38 } 39 40 public static Object newInstance(Object obj) { 41 return java.lang.reflect.Proxy.newProxyInstance(obj.getClass().getClassLoader(), obj.getClass().getInterfaces(), 42 new CommandProxy(obj)); 43 } 44 45 @Override 46 public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { 47 Object result; 48 try { 49 if (args[0].equals(\u0026#34;rm\u0026#34;)) { 50 throw new IllegalAccessException(\u0026#34;rm command not allowed\u0026#34;); 51 } else { 52 result = method.invoke(obj, args); 53 } 54 return result; 55 } catch (InvocationTargetException ex) { 56 throw ex.getTargetException(); 57 } catch (Exception ex) { 58 throw new RuntimeException(\u0026#34;invocation exception \u0026#34; + ex.getMessage()); 59 } 60 } 61 62} 4. Flyweight Pattern Flyweight pattern is used when we need to create a lot of Objects of a class eg 100,000 objects. Reduce cost of storage for large objects by sharing. When we share objects we need to determine what is intrinsic and extrinsic attributes. Here beeType is an intrinsic state and will be shared by all bees. The (x,y) coordinates are the extrinsic properties which will vary for each object. Notice that a factory pattern is also seen in the flyweight example below.\n1package com.demo.basics.designpatterns._09_flyweight; 2 3import java.util.HashMap; 4import java.util.Random; 5 6import org.junit.jupiter.api.Assertions; 7import org.junit.jupiter.api.Test; 8 9//divide Object property into intrinsic and extrinsic properties 10enum BeeType { 11 WORKER, ATTACKER; 12 13 public static BeeType getRandom() { 14 return BeeType.values()[new Random().nextInt(2)]; 15 } 16} 17 18interface Bee { 19 void carryOutMission(int x, int y); 20} 21 22public class FlyWeightPatternTest { 23 24 @Test 25 public void test() { 26 for (int i = 0; i \u0026lt; 1000; i++) { 27 int posx = new Random().nextInt(10); 28 int posy = new Random().nextInt(10); 29 FlyweightBeeFactory.getBeeType(BeeType.getRandom()).carryOutMission(posx, posy); 30 } 31 System.out.println(\u0026#34;Total Bee objects created:\u0026#34; + FlyweightBeeFactory.bees.size()); 32 Assertions.assertEquals(2, FlyweightBeeFactory.bees.size()); 33 } 34} 35 36class WorkerBee implements Bee { 37 38 BeeType beeType; 39 40 public WorkerBee(BeeType beeType) { 41 //Takes long time 42 System.out.println(\u0026#34;Creating worker bee!\u0026#34;); 43 this.beeType = beeType; 44 } 45 46 @Override 47 public void carryOutMission(int x, int y) { 48 System.out.println(\u0026#34;Depositing honey at (\u0026#34; + x + \u0026#34;,\u0026#34; + y + \u0026#34;) quadrant!\u0026#34;); 49 } 50 51} 52 53class AttackBee implements Bee { 54 55 BeeType beeType; 56 57 public AttackBee(BeeType beeType) { 58 //Takes long time 59 System.out.println(\u0026#34;Creating attack bee!\u0026#34;); 60 this.beeType = beeType; 61 } 62 63 @Override 64 public void carryOutMission(int x, int y) { 65 System.out.println(\u0026#34;Defending (\u0026#34; + x + \u0026#34;,\u0026#34; + y + \u0026#34;) quadrant!\u0026#34;); 66 } 67 68} 69 70class FlyweightBeeFactory { 71 72 public static final HashMap\u0026lt;BeeType, Bee\u0026gt; bees = new HashMap\u0026lt;\u0026gt;(); 73 74 public static Bee getBeeType(BeeType beeType) { 75 Bee bee = bees.get(beeType); 76 if (bee == null) { 77 if (beeType.equals(BeeType.WORKER)) { 78 bee = new WorkerBee(beeType); 79 } else { 80 bee = new AttackBee(beeType); 81 } 82 bees.put(beeType, bee); 83 } 84 return bee; 85 } 86 87} Now lets look at how the bad design would have looked, Here we end up creating large number of objects there by wasting memory. In the solution above we have moved out the extrinsic properties from the Bee class so that we can share the objects.\nBad Design Alert!\n1package com.demo.basics.designpatterns._09_flyweight_bad; 2 3import java.util.Random; 4 5import lombok.SneakyThrows; 6import org.junit.jupiter.api.Assertions; 7import org.junit.jupiter.api.Test; 8 9enum BeeType { 10 WORKER, ATTACKER; 11 12 public static BeeType getRandom() { 13 //Returns random bee types. 14 return BeeType.values()[new Random().nextInt(2)]; 15 } 16} 17 18interface Bee { 19 void carryOutMission(int x, int y); 20} 21 22public class WrongFlyWeightPatternTest { 23 24 @Test 25 public void test() { 26 int i = 0; 27 for (; i \u0026lt; 100; i++) { 28 int posx = new Random().nextInt(10); 29 int posy = new Random().nextInt(10); 30 BeeType type = BeeType.getRandom(); 31 if (type.equals(BeeType.WORKER)) { 32 new WorkerBee(BeeType.getRandom()).carryOutMission(posx, posy); 33 } else { 34 new AttackBee(BeeType.getRandom()).carryOutMission(posx, posy); 35 } 36 37 } 38 System.out.println(\u0026#34;Total Bee objects created:\u0026#34; + i); 39 Assertions.assertEquals(100, i); 40 } 41} 42 43class WorkerBee implements Bee { 44 45 BeeType beeType; 46 47 public WorkerBee(BeeType beeType) { 48 //Takes long time 49 System.out.println(\u0026#34;Creating worker bee!\u0026#34;); 50 this.beeType = beeType; 51 } 52 53 @Override 54 public void carryOutMission(int x, int y) { 55 System.out.println(beeType + \u0026#34;, Depositing honey at (\u0026#34; + x + \u0026#34;,\u0026#34; + y + \u0026#34;) quadrant!\u0026#34;); 56 } 57 58} 59 60class AttackBee implements Bee { 61 62 BeeType beeType; 63 64 @SneakyThrows 65 public AttackBee(BeeType beeType) { 66 //Takes long time 67 System.out.println(\u0026#34;Creating attack bee!\u0026#34;); 68 this.beeType = beeType; 69 } 70 71 @Override 72 public void carryOutMission(int x, int y) { 73 System.out.println(beeType + \u0026#34;, Defending (\u0026#34; + x + \u0026#34;,\u0026#34; + y + \u0026#34;) quadrant!\u0026#34;); 74 } 75 76} 5. Facade Pattern Facade pattern is used to give unified interface to a set of interfaces in a subsystem.\n1package com.demo.basics.designpatterns._10_facade; 2 3import org.junit.jupiter.api.Assertions; 4import org.junit.jupiter.api.Test; 5 6/** 7 * makes the subsystem easier to use 8 */ 9enum DbType { 10 ORACLE, MYSQL; 11} 12 13public class FacadePatternTest { 14 @Test 15 public void test() { 16 Assertions.assertEquals(\u0026#34;mysql report\u0026#34;, HelperFacade.generateReport(DbType.MYSQL)); 17 Assertions.assertEquals(\u0026#34;oracle report\u0026#34;, HelperFacade.generateReport(DbType.ORACLE)); 18 } 19} 20 21class MysqlHelper { 22 23 public String mysqlReport() { 24 return \u0026#34;mysql report\u0026#34;; 25 } 26} 27 28class OracleHelper { 29 30 public String oracleReport() { 31 return \u0026#34;oracle report\u0026#34;; 32 } 33 34} 35 36class HelperFacade { 37 38 public static String generateReport(DbType db) { 39 switch (db) { 40 case ORACLE: 41 OracleHelper ohelper = new OracleHelper(); 42 return ohelper.oracleReport(); 43 case MYSQL: 44 MysqlHelper mhelper = new MysqlHelper(); 45 return mhelper.mysqlReport(); 46 default: 47 return \u0026#34;\u0026#34;; 48 } 49 } 50} 6. Bridge Pattern Bridge Pattern is used to decouple the interfaces from implementation. Prefer Composition over inheritance. There are interface hierarchies in both interfaces as well a implementations.\nBy decoupling the switch \u0026amp; electric device from each other each can vary independently. You can add new switches, you can add new electric devices independently without increasing complexity.\n1package com.demo.basics.designpatterns._11_bridge; 2 3import lombok.AllArgsConstructor; 4import org.junit.jupiter.api.Test; 5 6/** 7 * Decouple an abstraction from its implementation so that the two can vary independently 8 */ 9 10interface ElectricDevice { 11 void doSomething(); 12} 13 14public class BridgePatternTest { 15 16 @Test 17 public void test() { 18 Switch switch1 = new PullSwitch(new Light()); 19 switch1.toggle(); 20 System.out.println(); 21 Switch switch2 = new PressSwitch(new Fan()); 22 switch2.toggle(); 23 } 24 25} 26 27class Fan implements ElectricDevice { 28 29 @Override 30 public void doSomething() { 31 System.out.println(\u0026#34;Fan!\u0026#34;); 32 } 33} 34 35class Light implements ElectricDevice { 36 37 @Override 38 public void doSomething() { 39 System.out.println(\u0026#34;Light!\u0026#34;); 40 } 41} 42 43@AllArgsConstructor 44abstract class Switch { 45 46 protected ElectricDevice eDevice; 47 48 public abstract void toggle(); 49} 50 51class PressSwitch extends Switch { 52 53 boolean state; 54 55 public PressSwitch(ElectricDevice d) { 56 super(d); 57 } 58 59 @Override 60 public void toggle() { 61 if (state) { 62 System.out.print(\u0026#34;Pressed Switch, Now turning off :\u0026#34;); 63 eDevice.doSomething(); 64 state = Boolean.FALSE; 65 } else { 66 System.out.print(\u0026#34;Pressed Switch, Now turning on :\u0026#34;); 67 eDevice.doSomething(); 68 state = Boolean.TRUE; 69 } 70 } 71} 72 73class PullSwitch extends Switch { 74 75 boolean state; 76 77 public PullSwitch(ElectricDevice d) { 78 super(d); 79 } 80 81 @Override 82 public void toggle() { 83 if (state) { 84 System.out.print(\u0026#34;Pulled Switch, Now turning off :\u0026#34;); 85 eDevice.doSomething(); 86 state = Boolean.FALSE; 87 } else { 88 System.out.print(\u0026#34;Pulled Switch, Now turning on :\u0026#34;); 89 eDevice.doSomething(); 90 state = Boolean.TRUE; 91 } 92 } 93} UML of Bridge Pattern. There is a bridge between Switch class and ElectricDevice class.\nBad Design Alert!\nLets look at how a problematic code looks like and its eligibility for bridge pattern. In the below code trying to add a new Electric Device + Switch combination is a pain which is solved by the bridge pattern mentioned above.\n1package com.demo.basics.designpatterns._11_bridge_bad; 2 3import org.junit.jupiter.api.Test; 4 5public class WrongBridgePatternTest { 6 7 @Test 8 public void test() { 9 PullSwitch switch1 = new PullSwitchFan(); 10 PressSwitch switch2 = new PressSwitchLight(); 11 switch1.toggle(); 12 switch2.toggle(); 13 } 14} 15 16abstract class Switch { 17 abstract public void toggle(); 18} 19 20abstract class PullSwitch extends Switch { 21} 22 23abstract class PressSwitch extends Switch { 24} 25 26class PullSwitchFan extends PullSwitch { 27 28 boolean state; 29 30 @Override 31 public void toggle() { 32 if (state) { 33 System.out.println(\u0026#34;Pulled Switch, Now turning off fan\u0026#34;); 34 state = Boolean.FALSE; 35 } else { 36 System.out.println(\u0026#34;Pulled Switch, Now turning on fan\u0026#34;); 37 state = Boolean.TRUE; 38 } 39 } 40} 41 42class PullSwitchLight extends PullSwitch { 43 44 boolean state; 45 46 @Override 47 public void toggle() { 48 if (state) { 49 System.out.println(\u0026#34;Pulled Switch, Now turning off light\u0026#34;); 50 state = Boolean.FALSE; 51 } else { 52 System.out.println(\u0026#34;Pulled Switch, Now turning on light\u0026#34;); 53 state = Boolean.TRUE; 54 } 55 } 56} 57 58class PressSwitchFan extends PressSwitch { 59 60 boolean state; 61 62 @Override 63 public void toggle() { 64 if (state) { 65 System.out.println(\u0026#34;Pressed Switch, Now turning off fan\u0026#34;); 66 state = Boolean.FALSE; 67 } else { 68 System.out.println(\u0026#34;Pressed Switch, Now turning on fan\u0026#34;); 69 state = Boolean.TRUE; 70 } 71 } 72} 73 74class PressSwitchLight extends PressSwitch { 75 76 boolean state; 77 78 @Override 79 public void toggle() { 80 if (state) { 81 System.out.println(\u0026#34;Pressed Switch, Now turning off light\u0026#34;); 82 state = Boolean.FALSE; 83 } else { 84 System.out.println(\u0026#34;Pressed Switch, Now turning on light\u0026#34;); 85 state = Boolean.TRUE; 86 } 87 } 88} UML Diagram of problematic code, you can see that hierarchy exists.\n7. Decorator Pattern Decorator design pattern is used to add the functionality by wrapping another class around the core class without modifying the core class. Disadvantage of decorator pattern is that it uses a lot of similar kind of objects.\n1package com.demo.basics.designpatterns._12_decorator; 2 3import lombok.AllArgsConstructor; 4import org.junit.jupiter.api.Assertions; 5import org.junit.jupiter.api.Test; 6 7interface Pizza { 8 String getDescription(); 9 10 Double getCost(); 11} 12 13public class DecoratorPatternTest { 14 15 @Test 16 public void test() { 17 Pizza doubleCheesePizza = new Cheese(new Cheese(new BasicPizza())); 18 Assertions.assertEquals(14.0, doubleCheesePizza.getCost()); 19 } 20} 21 22class BasicPizza implements Pizza { 23 24 @Override 25 public String getDescription() { 26 return \u0026#34;Basic Pizza\u0026#34;; 27 } 28 29 @Override 30 public Double getCost() { 31 return 10.0; 32 } 33} 34 35@AllArgsConstructor 36class PizzaToppingDecorator implements Pizza { 37 38 Pizza pizza; 39 40 @Override 41 public String getDescription() { 42 return pizza.getDescription(); 43 } 44 45 @Override 46 public Double getCost() { 47 return pizza.getCost(); 48 } 49} 50 51class Cheese extends PizzaToppingDecorator { 52 53 public Cheese(Pizza pizza) { 54 super(pizza); 55 } 56 57 @Override 58 public Double getCost() { 59 return (pizza.getCost() + 2.0); 60 } 61 62 @Override 63 public String getDescription() { 64 return pizza.getDescription() + \u0026#34; + Cheese\u0026#34;; 65 } 66} UML of Decorator Pattern\nBehavioral Design Patterns Behavioral patterns help design classes with better interaction between objects and provide lose coupling.\n1. Template Pattern Template Pattern used to create a method stub and deferring some of the steps of implementation to the subclasses. Template method defines the steps to execute an algorithm and it can provide default implementation that might be common for all or some of the subclasses.\n1package com.demo.basics.designpatterns._13_template; 2 3import org.junit.jupiter.api.Test; 4 5public class TemplatePatternTest { 6 7 @Test 8 public void test() { 9 HouseTemplate houseType = new WoodenHouse(); 10 houseType.buildHouse(); 11 System.out.println(); 12 houseType = new GlassHouse(); 13 houseType.buildHouse(); 14 } 15} 16 17class GlassHouse extends HouseTemplate { 18 19 @Override 20 public void buildWalls() { 21 System.out.println(\u0026#34;Building Glass Walls\u0026#34;); 22 } 23 24 @Override 25 public void buildPillars() { 26 System.out.println(\u0026#34;Building Glass Support Beams\u0026#34;); 27 } 28} 29 30class WoodenHouse extends HouseTemplate { 31 32 @Override 33 public void buildWalls() { 34 System.out.println(\u0026#34;Building Wooden Walls\u0026#34;); 35 } 36 37 @Override 38 public void buildPillars() { 39 System.out.println(\u0026#34;Building Wood Pillars\u0026#34;); 40 } 41 42} 43 44abstract class HouseTemplate { 45 46 /** 47 * template method, final so subclasses can\u0026#39;t override 48 */ 49 public final void buildHouse() { 50 buildFoundation(); 51 buildPillars(); 52 buildWalls(); 53 buildWindows(); 54 System.out.println(\u0026#34;House is built.\u0026#34;); 55 } 56 57 /** 58 * default implementation 59 */ 60 private void buildWindows() { 61 System.out.println(\u0026#34;Building Glass Windows\u0026#34;); 62 } 63 64 /** 65 * methods to be implemented by subclasses 66 */ 67 public abstract void buildWalls(); 68 69 public abstract void buildPillars(); 70 71 /** 72 * default implementation 73 */ 74 private void buildFoundation() { 75 System.out.println(\u0026#34;Building foundation with cement,iron \u0026amp; sand\u0026#34;); 76 } 77} 2. Mediator Pattern Mediator pattern is used to provide a centralized communication medium between different objects.\n1package com.demo.basics.designpatterns._14_mediator; 2 3import java.util.ArrayList; 4import java.util.List; 5 6import lombok.AllArgsConstructor; 7import org.junit.jupiter.api.Test; 8 9interface ChatMediator { 10 11 void sendMessage(String msg, User user); 12 13 void addUser(User user); 14} 15 16public class MediatorPatternTest { 17 18 @Test 19 public void test() { 20 21 ChatMediator mediator = new ChatMediatorImpl(); 22 User user1 = new User(mediator, \u0026#34;Raj\u0026#34;); 23 User user2 = new User(mediator, \u0026#34;Jacob\u0026#34;); 24 User user3 = new User(mediator, \u0026#34;Henry\u0026#34;); 25 User user4 = new User(mediator, \u0026#34;Stan\u0026#34;); 26 mediator.addUser(user1); 27 mediator.addUser(user2); 28 mediator.addUser(user3); 29 mediator.addUser(user4); 30 user1.send(\u0026#34;Hi All\u0026#34;); 31 32 } 33} 34 35class ChatMediatorImpl implements ChatMediator { 36 37 private List\u0026lt;User\u0026gt; users = new ArrayList\u0026lt;\u0026gt;(); 38 39 @Override 40 public void addUser(User user) { 41 this.users.add(user); 42 } 43 44 @Override 45 public void sendMessage(String msg, User user) { 46 for (User u : this.users) { 47 if (u != user) { 48 u.receive(msg); 49 } 50 } 51 } 52} 53 54@AllArgsConstructor 55class User { 56 57 private ChatMediator mediator; 58 private String name; 59 60 public void send(String msg) { 61 System.out.println(this.name + \u0026#34;: Sending Message=\u0026#34; + msg); 62 mediator.sendMessage(msg, this); 63 } 64 65 public void receive(String msg) { 66 System.out.println(this.name + \u0026#34;: Received Message:\u0026#34; + msg); 67 } 68} 3. Chain of Responsibility Pattern Chain of responsibility pattern is used when a request from client is passed to a chain of objects to process them.\n1package com.demo.basics.designpatterns._15_chainofresponsibility; 2 3import org.junit.jupiter.api.Test; 4 5interface DispenseChain { 6 7 void setNextChain(DispenseChain nextChain); 8 9 void dispense(int amount); 10} 11 12public class ChainResponsibilityPatternTest { 13 14 @Test 15 public void test() { 16 ATMDispenseChain atmDispenser = new ATMDispenseChain(); 17 int amount = 530; 18 if (amount % 10 != 0) { 19 System.out.println(\u0026#34;Amount should be in multiple of10s.\u0026#34;); 20 } else { 21 atmDispenser.c1.dispense(amount); 22 } 23 } 24} 25 26class ATMDispenseChain { 27 28 public DispenseChain c1; 29 30 public ATMDispenseChain() { 31 32 DispenseChain c1 = new Dollar50Dispenser(); 33 DispenseChain c2 = new Dollar20Dispenser(); 34 DispenseChain c3 = new Dollar10Dispenser(); 35 36 this.c1 = c1; 37 c1.setNextChain(c2); 38 c2.setNextChain(c3); 39 } 40 41} 42 43 44class Dollar10Dispenser implements DispenseChain { 45 46 private DispenseChain chain; 47 48 @Override 49 public void setNextChain(DispenseChain nextChain) { 50 this.chain = nextChain; 51 } 52 53 @Override 54 public void dispense(int amount) { 55 if (amount \u0026gt;= 10) { 56 int num = amount / 10; 57 int remainder = amount % 10; 58 System.out.println(\u0026#34;Dispensing \u0026#34; + num + \u0026#34; 10$ note\u0026#34;); 59 if (remainder != 0) { 60 this.chain.dispense(remainder); 61 } 62 } else { 63 this.chain.dispense(amount); 64 } 65 } 66} 67 68class Dollar20Dispenser implements DispenseChain { 69 70 private DispenseChain chain; 71 72 @Override 73 public void setNextChain(DispenseChain nextChain) { 74 this.chain = nextChain; 75 } 76 77 @Override 78 public void dispense(int amount) { 79 if (amount \u0026gt;= 20) { 80 int num = amount / 20; 81 int remainder = amount % 20; 82 System.out.println(\u0026#34;Dispensing \u0026#34; + num + \u0026#34; 20$ note\u0026#34;); 83 if (remainder != 0) { 84 this.chain.dispense(remainder); 85 } 86 } else { 87 this.chain.dispense(amount); 88 } 89 } 90} 91 92class Dollar50Dispenser implements DispenseChain { 93 94 private DispenseChain chain; 95 96 @Override 97 public void setNextChain(DispenseChain nextChain) { 98 this.chain = nextChain; 99 } 100 101 @Override 102 public void dispense(int amount) { 103 if (amount \u0026gt;= 50) { 104 int num = amount / 50; 105 int remainder = amount % 50; 106 System.out.println(\u0026#34;Dispensing \u0026#34; + num + \u0026#34; 50$ note\u0026#34;); 107 if (remainder != 0) { 108 this.chain.dispense(remainder); 109 } 110 } else { 111 this.chain.dispense(amount); 112 } 113 } 114} 4. Observer Pattern Observer design pattern is used when we want to get notified about state changes of a object. An Observer watches the Subject here and any changes on Subject are notified to the Observer.\n1package com.demo.basics.designpatterns._16_observer; 2 3import java.util.ArrayList; 4import java.util.List; 5 6import org.junit.jupiter.api.Test; 7 8interface Observer { 9 void notify(String tick); 10} 11 12interface Subject { 13 void registerObserver(Observer observer); 14 15 void notifyObservers(String tick); 16} 17 18public class ObserverPatternTest { 19 20 @Test 21 public void test() { 22 Feed feed = new Feed(); 23 feed.registerObserver(new AppleStockObserver()); 24 feed.registerObserver(new GoogleStockObserver()); 25 feed.notifyObservers(\u0026#34;APPL: 162.33\u0026#34;); 26 feed.notifyObservers(\u0026#34;GOOGL: 1031.22\u0026#34;); 27 } 28} 29 30class AppleStockObserver implements Observer { 31 @Override 32 public void notify(String tick) { 33 if (tick != null \u0026amp;\u0026amp; tick.contains(\u0026#34;APPL\u0026#34;)) { 34 System.out.println(\u0026#34;Apple Stock Price: \u0026#34; + tick); 35 } 36 } 37} 38 39class GoogleStockObserver implements Observer { 40 @Override 41 public void notify(String tick) { 42 if (tick != null \u0026amp;\u0026amp; tick.contains(\u0026#34;GOOGL\u0026#34;)) { 43 System.out.println(\u0026#34;Google Stock Price: \u0026#34; + tick); 44 } 45 } 46} 47 48class Feed implements Subject { 49 List\u0026lt;Observer\u0026gt; observerLst = new ArrayList\u0026lt;\u0026gt;(); 50 51 @Override 52 public void registerObserver(Observer observer) { 53 observerLst.add(observer); 54 } 55 56 @Override 57 public void notifyObservers(String tick) { 58 observerLst.forEach(e -\u0026gt; e.notify(tick)); 59 } 60} 5. Strategy Pattern Strategy pattern is used when we have multiple algorithm for a specific task and client decides the actual implementation to be used at runtime. This is also known as Policy Pattern.\n1package com.demo.basics.designpatterns._17_stategy; 2 3import org.junit.jupiter.api.Test; 4 5interface PaymentStrategy { 6 void pay(int amount); 7} 8 9public class StrategyPatternTest { 10 11 @Test 12 public void test() { 13 new ShoppingCart().pay(new CreditCardStrategy(), 10); 14 new ShoppingCart().pay(new PaypalStrategy(), 10); 15 } 16} 17 18class CreditCardStrategy implements PaymentStrategy { 19 20 @Override 21 public void pay(int amount) { 22 System.out.println(\u0026#34;Paid by credit card: \u0026#34; + amount); 23 } 24 25} 26 27class PaypalStrategy implements PaymentStrategy { 28 29 @Override 30 public void pay(int amount) { 31 System.out.println(\u0026#34;Paid by paypal: \u0026#34; + amount); 32 } 33 34} 35 36class ShoppingCart { 37 38 public void pay(PaymentStrategy paymentMethod, Integer amount) { 39 paymentMethod.pay(amount); 40 } 41} 1package com.demo.basics.designpatterns._17_strategy_lambda; 2 3import java.util.function.Consumer; 4 5import org.junit.jupiter.api.Test; 6 7public class StrategyLambdaPatternTest { 8 @Test 9 public void test() { 10 ShoppingCart shoppingCart = new ShoppingCart(); 11 12 Consumer\u0026lt;Integer\u0026gt; creditCard = (amount) -\u0026gt; System.out.println(\u0026#34;Paid by credit card: \u0026#34; + amount); 13 Consumer\u0026lt;Integer\u0026gt; payPal = (amount) -\u0026gt; System.out.println(\u0026#34;Paid by paypal: \u0026#34; + amount); 14 15 shoppingCart.pay(creditCard, 10); 16 shoppingCart.pay(payPal, 10); 17 } 18 19} 20 21class ShoppingCart { 22 public void pay(Consumer\u0026lt;Integer\u0026gt; payMethod, Integer amount) { 23 payMethod.accept(amount); 24 } 25} 6. Command Pattern Command pattern is used when request is wrapped and passed to invoker which then inturn invokes the encapsulated command. Here Command is our command interface, Stock class is our request. BuyStock and SellStock implementing Order interface which does the actual command processing.\n1package com.demo.basics.designpatterns._18_command; 2 3import java.util.ArrayList; 4import java.util.List; 5 6import lombok.AllArgsConstructor; 7import org.junit.jupiter.api.Test; 8 9interface Command { 10 void execute(); 11} 12 13public class CommandPatternTest { 14 15 @Test 16 public void test() { 17 18 Stock stock1 = new Stock(\u0026#34;GOOGL\u0026#34;, 10); 19 Stock stock2 = new Stock(\u0026#34;IBM\u0026#34;, 20); 20 21 BuyStock buyStockCmd = new BuyStock(stock1); 22 SellStock sellStockCmd = new SellStock(stock2); 23 24 Broker broker = new Broker(); 25 broker.takeOrder(buyStockCmd); 26 broker.takeOrder(sellStockCmd); 27 28 broker.placeOrders(); 29 } 30} 31 32@AllArgsConstructor 33class Stock { 34 35 private String name; 36 private int quantity; 37 38 public void buy() { 39 System.out.println(\u0026#34;Stock [ Name: \u0026#34; + name + \u0026#34;, Quantity: \u0026#34; + quantity + \u0026#34; ] bought\u0026#34;); 40 } 41 42 public void sell() { 43 System.out.println(\u0026#34;Stock [ Name: \u0026#34; + name + \u0026#34;, Quantity: \u0026#34; + quantity + \u0026#34; ] sold\u0026#34;); 44 } 45} 46 47@AllArgsConstructor 48class BuyStock implements Command { 49 private Stock stock; 50 51 public void execute() { 52 stock.buy(); 53 } 54} 55 56@AllArgsConstructor 57class SellStock implements Command { 58 private Stock stock; 59 60 public void execute() { 61 stock.sell(); 62 } 63} 64 65class Broker { 66 private List\u0026lt;Command\u0026gt; cmdLst = new ArrayList\u0026lt;Command\u0026gt;(); 67 68 public void takeOrder(Command cmd) { 69 cmdLst.add(cmd); 70 } 71 72 public void placeOrders() { 73 for (Command cmd : cmdLst) { 74 cmd.execute(); 75 } 76 cmdLst.clear(); 77 } 78} 7. State Pattern State pattern is used when object changes its behaviour based on internal state. You avoid writing the conditional if-else logic to determine the type of action to be taken based on state of object. Notice that GameContext also implements State along with StartState,StopState classes.\n1package com.demo.basics.designpatterns._19_state; 2 3import lombok.AllArgsConstructor; 4import lombok.Data; 5import lombok.NoArgsConstructor; 6import org.junit.jupiter.api.Test; 7 8interface State { 9 void doAction(); 10} 11 12public class StatePatternTest { 13 @Test 14 public void test() { 15 GameContext game = new GameContext(); 16 17 StartState startState = new StartState(); 18 StopState stopState = new StopState(); 19 20 game.setState(startState); 21 game.doAction(); 22 23 game.setState(stopState); 24 game.doAction(); 25 } 26} 27 28class StartState implements State { 29 30 public void doAction() { 31 System.out.println(\u0026#34;Roll the dice!\u0026#34;); 32 } 33} 34 35class StopState implements State { 36 37 public void doAction() { 38 System.out.println(\u0026#34;Game Over!\u0026#34;); 39 } 40} 41 42@AllArgsConstructor 43@NoArgsConstructor 44@Data 45class GameContext implements State { 46 private State state; 47 48 @Override 49 public void doAction() { 50 this.state.doAction(); 51 } 52} 8. Visitor Pattern Visitor pattern is used to add methods to different types of classes without altering those classes. Here we have moved the tax calculation outside each item.\n1package com.demo.basics.designpatterns._20_visitor; 2 3import lombok.AllArgsConstructor; 4import lombok.Data; 5import org.junit.jupiter.api.Assertions; 6import org.junit.jupiter.api.Test; 7 8interface Visitable { 9 double accept(Visitor visitor); 10} 11 12interface Visitor { 13 double visit(Liquor item); 14 15 double visit(Grocery item); 16} 17 18public class VisitorPatternTest { 19 @Test 20 public void test() { 21 22 Visitor taxCalculator = new TaxVisitor(); 23 Liquor liquor = new Liquor(\u0026#34;Vodka\u0026#34;, 12.00d); 24 double liquorPriceAfterTax = liquor.accept(taxCalculator); 25 System.out.println(\u0026#34;Price of liquor: \u0026#34; + liquorPriceAfterTax); 26 Assertions.assertEquals(15.6, liquorPriceAfterTax); 27 28 Grocery grocery = new Grocery(\u0026#34;Potato Chips\u0026#34;, 12.00d); 29 double groceryPriceAfterTax = grocery.accept(taxCalculator); 30 System.out.println(\u0026#34;Price of grocery: \u0026#34; + groceryPriceAfterTax); 31 Assertions.assertEquals(13.2, groceryPriceAfterTax); 32 } 33} 34 35@AllArgsConstructor 36@Data 37class Liquor implements Visitable { 38 String name; 39 double price; 40 41 @Override 42 public double accept(Visitor visitor) { 43 return visitor.visit(this); 44 } 45} 46 47@AllArgsConstructor 48@Data 49class Grocery implements Visitable { 50 String name; 51 double price; 52 53 @Override 54 public double accept(Visitor visitor) { 55 return visitor.visit(this); 56 } 57} 58 59class TaxVisitor implements Visitor { 60 61 @Override 62 public double visit(Liquor item) { 63 return item.price * .30 + item.price; 64 } 65 66 @Override 67 public double visit(Grocery item) { 68 return item.price * .10 + item.price; 69 } 70} 9. Interpreter Pattern Interpreter pattern provides a way to evaluate language grammar or expression.\n1package com.demo.basics.designpatterns._21_interpreter; 2 3import lombok.AllArgsConstructor; 4import lombok.Data; 5import org.junit.jupiter.api.Test; 6 7interface Expression { 8 String interpret(InterpreterContext ctx); 9} 10 11public class InterpreterPatternTest { 12 13 @Test 14 public void test() { 15 String input = \u0026#34;30 in binary\u0026#34;; 16 if (input.contains(\u0026#34;binary\u0026#34;)) { 17 int val = Integer.parseInt(input.substring(0, input.indexOf(\u0026#34; \u0026#34;))); 18 System.out.println(new IntToBinaryExpression(val).interpret(new InterpreterContext())); 19 } 20 21 input = \u0026#34;30 in hexadecimal\u0026#34;; 22 if (input.contains(\u0026#34;hexadecimal\u0026#34;)) { 23 int val = Integer.parseInt(input.substring(0, input.indexOf(\u0026#34; \u0026#34;))); 24 System.out.println(new IntToHexExpression(val).interpret(new InterpreterContext())); 25 } 26 } 27 28} 29 30class InterpreterContext { 31 public String getBinaryFormat(int val) { 32 return Integer.toBinaryString(val); 33 } 34 35 public String getHexFormat(int val) { 36 return Integer.toHexString(val); 37 } 38} 39 40@Data 41@AllArgsConstructor 42class IntToBinaryExpression implements Expression { 43 44 int val; 45 46 @Override 47 public String interpret(InterpreterContext ctx) { 48 return ctx.getBinaryFormat(val); 49 } 50} 51 52@Data 53@AllArgsConstructor 54class IntToHexExpression implements Expression { 55 56 int val; 57 58 @Override 59 public String interpret(InterpreterContext ctx) { 60 return ctx.getHexFormat(val); 61 } 62} 10. Iterator Pattern Iterator pattern is used to provide standard way to traverse through group of objects. In the example below we provide 2 types of iterators over the fruit collection, we could have let the user write his own iterator but if there are many clients using the iterator then it would be difficult to maintain. Notice that FruitIterator is private and inner class, this hides the implementation details from the client. Logic of iteration is internal to the collection.\n1package com.demo.basics.designpatterns._22_iterator; 2 3import java.util.ArrayList; 4import java.util.Collections; 5import java.util.Comparator; 6import java.util.Iterator; 7import java.util.List; 8 9import lombok.AllArgsConstructor; 10import lombok.Data; 11import org.junit.jupiter.api.Test; 12 13interface FruitCollection { 14 Iterator getIterator(String type); 15} 16 17public class IteratorPatternTest { 18 19 @Test 20 public void test() { 21 22 FruitCollectionImpl collection = new FruitCollectionImpl(); 23 24 for (Iterator iter = collection.getIterator(\u0026#34;COLOR\u0026#34;); iter.hasNext(); ) { 25 Fruit fruit = (Fruit) iter.next(); 26 System.out.println(fruit); 27 } 28 System.out.println(); 29 for (Iterator iter = collection.getIterator(\u0026#34;TYPE\u0026#34;); iter.hasNext(); ) { 30 Fruit fruit = (Fruit) iter.next(); 31 System.out.println(fruit); 32 } 33 } 34} 35 36@AllArgsConstructor 37@Data 38class Fruit { 39 String type; 40 String color; 41} 42 43class FruitCollectionImpl implements FruitCollection { 44 45 List\u0026lt;Fruit\u0026gt; fruits; 46 47 FruitCollectionImpl() { 48 fruits = new ArrayList\u0026lt;\u0026gt;(); 49 fruits.add(new Fruit(\u0026#34;Banana\u0026#34;, \u0026#34;Green\u0026#34;)); 50 fruits.add(new Fruit(\u0026#34;Apple\u0026#34;, \u0026#34;Green\u0026#34;)); 51 fruits.add(new Fruit(\u0026#34;Banana\u0026#34;, \u0026#34;Yellow\u0026#34;)); 52 fruits.add(new Fruit(\u0026#34;Cherry\u0026#34;, \u0026#34;Red\u0026#34;)); 53 fruits.add(new Fruit(\u0026#34;Apple\u0026#34;, \u0026#34;Red\u0026#34;)); 54 } 55 56 @Override 57 public Iterator getIterator(String type) { 58 if (type.equals(\u0026#34;COLOR\u0026#34;)) { 59 return new FruitIterator(\u0026#34;COLOR\u0026#34;); 60 } else { 61 return new FruitIterator(\u0026#34;TYPE\u0026#34;); 62 } 63 } 64 65 private class FruitIterator implements Iterator { 66 int index; 67 List\u0026lt;Fruit\u0026gt; sortedFruits = new ArrayList\u0026lt;\u0026gt;(fruits); 68 69 FruitIterator(String iteratorType) { 70 if (iteratorType.equals(\u0026#34;COLOR\u0026#34;)) { 71 Collections.sort(sortedFruits, Comparator.comparing(Fruit::getColor)); 72 } else { 73 Collections.sort(sortedFruits, Comparator.comparing(Fruit::getType)); 74 } 75 } 76 77 @Override 78 public boolean hasNext() { 79 if (index \u0026lt; sortedFruits.size()) { 80 return true; 81 } 82 return false; 83 } 84 85 @Override 86 public Object next() { 87 if (this.hasNext()) { 88 return sortedFruits.get(index++); 89 } 90 return null; 91 } 92 } 93 94} 11. Memento Pattern Memento pattern is used to restore state of an object to a previous state.\nMemento pattern involves three classes.\nOriginator: The core class which holds a state. This state will need to be reverted to previous states. Think of this as your text editor text data. Memento: The class has all the same attributes as Originator class and is used to hold values that will be restored back to the Originator class. Think of this as a temporary variable. Each time you click on save a memento is created and added to the list so that it can be reverted later. CareTaker - This class takes ownership of creating and restoring memento. In the example below you can create a Originator object and change its state many times, only when you call the CareTaker.save method a memento gets created so that an undo operation later on can revert to that state. The list mementoList is private so only caretaker has access to the memento objects ensuring integrity of data. Take special care if the attribute is immutable in the undoState method.\n1package com.demo.basics.designpatterns._23_memento; 2 3import java.util.ArrayList; 4import java.util.List; 5 6import lombok.AllArgsConstructor; 7import lombok.Data; 8import lombok.NoArgsConstructor; 9import lombok.RequiredArgsConstructor; 10import org.junit.jupiter.api.Test; 11 12public class MementoPatternTest { 13 14 @Test 15 public void test() { 16 17 Originator originator = new Originator(); 18 CareTaker careTaker = new CareTaker(originator); 19 careTaker.save(); 20 21 originator.setState(\u0026#34;State #1\u0026#34;); 22 originator.setState(\u0026#34;State #2\u0026#34;); 23 careTaker.save(); 24 25 originator.setState(\u0026#34;State #3\u0026#34;); 26 careTaker.save(); 27 28 originator.setState(\u0026#34;State #4\u0026#34;); 29 System.out.println(\u0026#34;Current State: \u0026#34; + originator.getState()); 30 31 careTaker.undo(); 32 System.out.println(\u0026#34;Current State: \u0026#34; + originator.getState()); 33 34 careTaker.undo(); 35 System.out.println(\u0026#34;Current State: \u0026#34; + originator.getState()); 36 37 careTaker.undo(); 38 careTaker.undo(); 39 careTaker.undo(); 40 System.out.println(\u0026#34;Current State: \u0026#34; + originator.getState()); 41 } 42} 43 44@Data 45@AllArgsConstructor 46class Memento { 47 private String state; 48} 49 50@Data 51@AllArgsConstructor 52@NoArgsConstructor 53class Originator { 54 private String state; 55 56 public Memento saveState() { 57 return new Memento(this.state); 58 } 59 60 public void undoState(Memento memento) { 61 this.state = memento.getState(); 62 } 63 64} 65 66@RequiredArgsConstructor 67class CareTaker { 68 final Originator origin; 69 private List\u0026lt;Memento\u0026gt; mementoList = new ArrayList\u0026lt;Memento\u0026gt;(); 70 71 public void save() { 72 if (origin.getState() != null) { 73 mementoList.add(origin.saveState()); 74 } 75 } 76 77 public void undo() { 78 if (!mementoList.isEmpty()) { 79 origin.undoState(mementoList.get(mementoList.size() - 1)); 80 mementoList.remove(mementoList.size() - 1); 81 } 82 } 83} Differences 1. Difference between bridge pattern and adapter pattern Bridge pattern is built upfront you break things at design time to make changes so that functionality can be added without tight coupling, adapter pattern works after code is already designed like legacy code.\n2. Difference between mediator pattern and observer pattern In observer, many objects are interested in the state change of one object. They are not interested in each other. So the relation is one to many. In mediator, many objects are interested to communicate many other objects. Here the relation is many to many.\n3. Difference between chain of responsibility and command pattern In chain of responsibility pattern, the request is passed to potential receivers, whereas the command pattern uses a command object that encapsulates a request.\n4. Difference between adapter pattern and decorator pattern Adapter pattern only adapts functionality, decorator adds more functionality.\n5. Difference between adapter pattern and facade pattern Adapter pattern just links two incompatible interfaces. A facade is used when one wants an easier or simpler interface to work with.\n","link":"https://gitorko.github.io/post/design-patterns/","section":"post","tags":["design-pattern"],"title":"Design Patterns"},{"body":"","link":"https://gitorko.github.io/tags/xenon/","section":"tags","tags":null,"title":"Xenon"},{"body":"Simple rest service using VMWare Xenon Framework. We will deploy multi-node instance of the rest service and explore the xenon UI.\nXenon Xenon is a framework for writing small REST-based services. When you write a rest service in other frameworks like spring you have to pick a data store like mongo db to persist your data, you have to maintain separate instances of mongo db to ensure replication works. You have to then deploy your rest service on distributed environment using docker swarm and ensure high availability of your service. Xenon framework does all this for you with just a single library. Here we will see how to create a simple rest service using xenon and then see how we can achieve asynchronous call,distributed node deployment, replication, synchronization, ordering, and consistency of data across those nodes. We will see how we can scale the application etc.\nCreate a simple maven project. If you are using vscode ctrl+shift+p 'Maven: Generate from maven Archetype' and select a folder, click on 'maven-archetype-quickstart' and enter the project details. You can also run the command and enter 'groupId': com.demo.xenon \u0026amp; 'artifactId': myxenon\n1$ mvn archetype:generate -DarchetypeArtifactId=\u0026#34;maven-archetype-quickstart\u0026#34; -DarchetypeGroupId=\u0026#34;org.apache.maven.archetypes\u0026#34; Add the xenon dependency to your pom.xml \u0026amp; update the plugins. The 'xenon-common' is the only core library you need, but to visualize things we have added 'xenon-ui'\n1\u0026lt;dependencies\u0026gt; 2 \u0026lt;dependency\u0026gt; 3 \u0026lt;groupId\u0026gt;com.vmware.xenon\u0026lt;/groupId\u0026gt; 4 \u0026lt;artifactId\u0026gt;xenon-common\u0026lt;/artifactId\u0026gt; 5 \u0026lt;version\u0026gt;1.4.0\u0026lt;/version\u0026gt; 6 \u0026lt;/dependency\u0026gt; 7 \u0026lt;dependency\u0026gt; 8 \u0026lt;groupId\u0026gt;com.vmware.xenon\u0026lt;/groupId\u0026gt; 9 \u0026lt;artifactId\u0026gt;xenon-ui\u0026lt;/artifactId\u0026gt; 10 \u0026lt;version\u0026gt;1.4.0\u0026lt;/version\u0026gt; 11 \u0026lt;/dependency\u0026gt; 12\u0026lt;/dependencies\u0026gt; 13 14\u0026lt;build\u0026gt; 15 \u0026lt;plugins\u0026gt; 16 \u0026lt;plugin\u0026gt; 17 \u0026lt;groupId\u0026gt;org.apache.maven.plugins\u0026lt;/groupId\u0026gt; 18 \u0026lt;artifactId\u0026gt;maven-compiler-plugin\u0026lt;/artifactId\u0026gt; 19 \u0026lt;version\u0026gt;3.7.0\u0026lt;/version\u0026gt; 20 \u0026lt;configuration\u0026gt; 21 \u0026lt;source\u0026gt;1.8\u0026lt;/source\u0026gt; 22 \u0026lt;target\u0026gt;1.8\u0026lt;/target\u0026gt; 23 \u0026lt;/configuration\u0026gt; 24 \u0026lt;/plugin\u0026gt; 25 \u0026lt;plugin\u0026gt; 26 \u0026lt;groupId\u0026gt;org.apache.maven.plugins\u0026lt;/groupId\u0026gt; 27 \u0026lt;artifactId\u0026gt;maven-assembly-plugin\u0026lt;/artifactId\u0026gt; 28 \u0026lt;version\u0026gt;2.4.1\u0026lt;/version\u0026gt; 29 \u0026lt;configuration\u0026gt; 30 \u0026lt;descriptorRefs\u0026gt; 31 \u0026lt;descriptorRef\u0026gt;jar-with-dependencies\u0026lt;/descriptorRef\u0026gt; 32 \u0026lt;/descriptorRefs\u0026gt; 33 \u0026lt;archive\u0026gt; 34 \u0026lt;manifest\u0026gt; 35 \u0026lt;mainClass\u0026gt;com.demo.xenon.App\u0026lt;/mainClass\u0026gt; 36 \u0026lt;/manifest\u0026gt; 37 \u0026lt;/archive\u0026gt; 38 \u0026lt;/configuration\u0026gt; 39 \u0026lt;executions\u0026gt; 40 \u0026lt;execution\u0026gt; 41 \u0026lt;id\u0026gt;make-assembly\u0026lt;/id\u0026gt; 42 \u0026lt;phase\u0026gt;package\u0026lt;/phase\u0026gt; 43 \u0026lt;goals\u0026gt; 44 \u0026lt;goal\u0026gt;single\u0026lt;/goal\u0026gt; 45 \u0026lt;/goals\u0026gt; 46 \u0026lt;/execution\u0026gt; 47 \u0026lt;/executions\u0026gt; 48 \u0026lt;/plugin\u0026gt; 49 \u0026lt;plugin\u0026gt; 50 \u0026lt;groupId\u0026gt;org.codehaus.mojo\u0026lt;/groupId\u0026gt; 51 \u0026lt;artifactId\u0026gt;exec-maven-plugin\u0026lt;/artifactId\u0026gt; 52 \u0026lt;version\u0026gt;1.6.0\u0026lt;/version\u0026gt; 53 \u0026lt;configuration\u0026gt; 54 \u0026lt;mainClass\u0026gt;com.demo.xenon.App\u0026lt;/mainClass\u0026gt; 55 \u0026lt;/configuration\u0026gt; 56 \u0026lt;/plugin\u0026gt; 57 \u0026lt;/plugins\u0026gt; 58\u0026lt;/build\u0026gt; Modify your App.java, Your App class extends the ServiceHost class. If you want you nodes to have specific name modify 'defaultArgs.id'. Note the 'defaultArgs.sandbox' path, this is where the data files of the data store will reside. On windows it will be 'C:/tmp/xenondb'.\n1package com.demo.xenon; 2 3import java.nio.file.Paths; 4import java.util.UUID; 5 6import com.vmware.xenon.common.ServiceHost; 7import com.vmware.xenon.services.common.RootNamespaceService; 8import com.vmware.xenon.ui.UiService; 9 10public class App extends ServiceHost { 11 public static void main(String[] args) throws Throwable { 12 App appHost = new App(); 13 Arguments defaultArgs = new Arguments(); 14 defaultArgs.id = \u0026#34;host:\u0026#34; + UUID.randomUUID(); 15 defaultArgs.sandbox = Paths.get(\u0026#34;/tmp/xenondb\u0026#34;); 16 appHost.initialize(args, defaultArgs); 17 appHost.start(); 18 Runtime.getRuntime().addShutdownHook(new Thread(appHost::stop)); 19 } 20 21 @Override 22 public ServiceHost start() throws Throwable { 23 super.start(); 24 startDefaultCoreServicesSynchronously(); 25 super.startService(new RootNamespaceService()); 26 super.startService(new UiService()); 27 return this; 28 } 29} Build the project and execute the code. Note: Do not try the approach mvn exec:exec as this will detach the java process on ctrl+c and you will have to kill the java process manually.\n1$ mvn clean install 2$ mvn exec:java Your xenon server should now be up. You should view http://localhost:8000/core/ui/default\nYou just hosted the xenon server. You have not yet written a rest service. At this point you already get 13 core services which are transactions,resource-groups,roles,local-query-tasks,credentials,sync-tasks,graph-queries,users,user-groups,node-groups,tenants,processes,query-tasks. The custom services card at the bottom shows a count of 0. Now lets write our first xenon rest service.\nCreate a new class BookStoreService.java\n1package com.demo.xenon; 2 3import com.vmware.xenon.common.Operation; 4import com.vmware.xenon.common.ServiceDocument; 5import com.vmware.xenon.common.ServiceDocumentDescription.PropertyUsageOption; 6import com.vmware.xenon.common.StatefulService; 7import com.vmware.xenon.common.Utils; 8 9public class BookStoreService extends StatefulService { 10 11 public static final String FACTORY_LINK = \u0026#34;/myservice/books\u0026#34;; 12 13 public static class Book extends ServiceDocument { 14 15 @UsageOption(option = PropertyUsageOption.AUTO_MERGE_IF_NOT_NULL) 16 @UsageOption(option = PropertyUsageOption.REQUIRED) 17 public String bookName; 18 19 @UsageOption(option = PropertyUsageOption.AUTO_MERGE_IF_NOT_NULL) 20 @UsageOption(option = PropertyUsageOption.REQUIRED) 21 public Double bookPrice; 22 23 } 24 25 public BookStoreService() { 26 super(Book.class); 27 toggleOption(ServiceOption.PERSISTENCE, true); 28 toggleOption(ServiceOption.REPLICATION, true); 29 toggleOption(ServiceOption.INSTRUMENTATION, true); 30 toggleOption(ServiceOption.OWNER_SELECTION, true); 31 } 32 33 @Override 34 public void handleCreate(Operation startPost) { 35 Book book = getBody(startPost); 36 Utils.validateState(getStateDescription(), book); 37 startPost.complete(); 38 } 39 40 @Override 41 public void handlePut(Operation put) { 42 Book book = getBody(put); 43 Utils.validateState(getStateDescription(), book); 44 setState(put, book); 45 put.complete(); 46 } 47 48 @Override 49 public void handlePatch(Operation patch) { 50 Book bookState = getState(patch); 51 Book book = getBody(patch); 52 Utils.mergeWithState(getStateDescription(), bookState, book); 53 patch.setBody(bookState); 54 patch.complete(); 55 } 56} Add this line to the App.java start method \u0026amp; Run the program\n1super.startFactory(new BookStoreService()); 1$ mvn exec:java You can now create a book.\n1curl -X POST -H \u0026#39;Content-Type: application/json\u0026#39; -i http://localhost:8000/myservice/books --data \u0026#39;{ 2bookName: \u0026#34;book1\u0026#34;, 3bookPrice: 2.0 4}\u0026#39; 5 6curl -X GET -H \u0026#39;Content-Type: application/json\u0026#39; -i http://localhost:8000/myservice/books 7 8curl -X GET -H \u0026#39;Content-Type: application/json\u0026#39; -i http://localhost:8000/myservice/books/b5533c1dd2d595c557181891d2dc0 Response:\n1{ 2 \u0026#34;documentLinks\u0026#34;: [ 3 \u0026#34;/myservice/books/b5533c1dd2d595c557181891d2dc0\u0026#34; 4 ], 5 \u0026#34;documentCount\u0026#34;: 1, 6 \u0026#34;queryTimeMicros\u0026#34;: 17999, 7 \u0026#34;documentVersion\u0026#34;: 0, 8 \u0026#34;documentUpdateTimeMicros\u0026#34;: 0, 9 \u0026#34;documentExpirationTimeMicros\u0026#34;: 0, 10 \u0026#34;documentOwner\u0026#34;: \u0026#34;host:e413831a-3247-4dfb-aecf-24d319591a84\u0026#34; 11} 1{ 2 \u0026#34;bookName\u0026#34;: \u0026#34;book1\u0026#34;, 3 \u0026#34;bookPrice\u0026#34;: 2.0, 4 \u0026#34;documentVersion\u0026#34;: 0, 5 \u0026#34;documentEpoch\u0026#34;: 0, 6 \u0026#34;documentKind\u0026#34;: \u0026#34;com:demo:xenon:BookStoreService:Book\u0026#34;, 7 \u0026#34;documentSelfLink\u0026#34;: \u0026#34;/myservice/books/b5533c1dd2d595c557181891d2dc0\u0026#34;, 8 \u0026#34;documentUpdateTimeMicros\u0026#34;: 1532181069064001, 9 \u0026#34;documentUpdateAction\u0026#34;: \u0026#34;POST\u0026#34;, 10 \u0026#34;documentExpirationTimeMicros\u0026#34;: 0, 11 \u0026#34;documentOwner\u0026#34;: \u0026#34;host:e413831a-3247-4dfb-aecf-24d319591a84\u0026#34; 12} Owner Selection Now lets assume you have 1 million book entries, will the data be replicated across all nodes? The data replication will be shared among the nodes that have ServiceOption.OWNER_SELECTION enabled. By enabling this you are telling the service that this node will take ownership of storing the data. So if there are 3 nodes in the poll then each node will store 1/3 of the 1 million records in the data store, if one of the node goes down then rebalancing happens and now the 2 nodes each have 1/2 of the 1 million records. When you request hits a server which doesnt have that data stored locally the request then gets forwarded to the node which is the owner of that data. You can have few nodes within the pool with ServiceOption.OWNER_SELECTION disabled they will internally forward the requests to the OWNER nodes.\nMutlinode Multi node capability in xenon provides high availability \u0026amp; scalability in terms of storage \u0026amp; request processing.\nStateful Service vs Stateless Service What we wrote above is a stateful rest service. A stateful service invovles data that needs persistence. An example of a stateless rest service would be a proxy service.\nReplication factor \u0026amp; Quorum The replication factor tells xenon how many nodes the stateful service needs to be replicated over. Default is all. Quroum tells xenon on how many nodes the persist operation should be successful before considering something as persisted. If you have defined quorum of 3 nodes and 1 node fails then all future write requests to will fail as there arent enough members to validate the quorum. Default is majority quorum [n/2+1] where n is replication factor. On a 3 node with replication factor all, quorum is [3/2+1] = 2 which means 2 nodes have to agree for a write to be committed.\nServiceOption Description PERSISTENCE persists data REPLICATION replicates data across nodes INSTRUMENTATION provides stats about service OWNER_SELECTION takes ownership of storing data PropertyUsageOption Annotation Description ID id field AUTO_MERGE_IF_NOT_NULL helper method will merge current state with state supplied in body in case of updates OPTIONAL not optional REQUIRED mandatory field SERVICE_USE used internally Now let us deploy our service in a distributed environment. Delete the test folder as we wont be covering it here. Run the command\n1$ mvn clean install 2 3$ cd target; 4 5$ java -jar myxenon-1.0-SNAPSHOT-jar-with-dependencies.jar This should start the single node instance of your book store rest service.\nNow lets add 2 more nodes to the quorum. All the 3 nodes (\u0026quot;Service Host\u0026quot;) will form what is called a node group. Replication happens within this node group.\n1$ java -jar myxenon-1.0-SNAPSHOT-jar-with-dependencies.jar --port=8001 --peerNodes=http://localhost:8000 2$ java -jar myxenon-1.0-SNAPSHOT-jar-with-dependencies.jar --port=8002 --peerNodes=http://localhost:8000 It will take a few seconds for the nodes to synchronize/converge, the nodes use gossip to detect changes and identify new members in group.. Note: The node name should be unique in the quorum. There must be minimum 3 nodes. You should be able to see all 3 nodes in the UI. You can try shutting down the primary node and see if data is still accessible. A node group identifies which node is the owner of the data and forwards request to the owner to retrieve that.\nYou can also add a node to a group after its started, you can do this by invoking a rest call (JoinPeerRequest). For now we will use the option of adding node to node group at startup time by providing --peerNodes. Such an ability to join a node group will be very useful in IOT based devices.\n1curl -X GET -H \u0026#39;Content-Type: application/json\u0026#39; -i http://localhost:8001/myservice/books/e948dec6a69bdd3f57182b45a6740 Clicking on the http://localhost:8000/core/ui/default/#/main/service/id__myservice_books should show up more details on the record. You can edit/delete records from the UI as well.\nYou can also query for your data in the query tab.\nReferences Xenon\nXenon Github\n","link":"https://gitorko.github.io/post/xenon-rest-service-framework/","section":"post","tags":["xenon"],"title":"Xenon Rest Service Framework"},{"body":"References to all projects under https://github.com/gitorko/\nReferences Project Link Status coding https://github.com/gitorko/coding Base Project for Algorithm Coding Interview project01 https://github.com/gitorko/project01 Data Structure \u0026amp; Algorithms project02 https://github.com/gitorko/project02 Spring - Rsocket project03 https://github.com/gitorko/project03 Kotlin Spring Boot Rest project04 https://github.com/gitorko/project04 Distributed Locking - Apache Ignite project05 https://github.com/gitorko/project05 Distributed Locking - Postgres project06 https://github.com/gitorko/project06 Encode \u0026amp; Decode Watermarked message project07 https://github.com/gitorko/project07 HandleBars Template project08 https://github.com/gitorko/project08 Apache Spark project09 https://github.com/gitorko/project09 Spring AI \u0026amp; Ollama project10 https://github.com/gitorko/project10 Arduino Projects project11 https://github.com/gitorko/project11 Distributed Periodic Task Scheduler project12 https://github.com/gitorko/project12 NestJs + NextJs + Postgresql project14 https://github.com/gitorko/project14 Spring Boot + Bazel project15 https://github.com/gitorko/project15 ExpressJs + Expo + Postgres (Mobile Application) project15 https://github.com/gitorko/project16 Next.js (Frontend + Backend) project57 https://github.com/gitorko/project57 Distributed System Essentials project58 https://github.com/gitorko/project58 Spring Virtual Threads, JUnit5 Test, JaCoCo, SpotBugs, Checkstyle project59 https://github.com/gitorko/project59 Spring Boot JobRunr project60 https://github.com/gitorko/project60 Spring WebFlux \u0026amp; Angular, Reactive MongoDB, Clarity, Docker project61 https://github.com/gitorko/project61 Kubernetes Samples project63 https://github.com/gitorko/project63 Spring Boot - Drools project64 https://github.com/gitorko/project64 Spring WebFlux Reactive JDBC project65 https://github.com/gitorko/project65 Spring Webflux \u0026amp; R2DBC project66 https://github.com/gitorko/project66 Spring Data JPA N+1 project67 https://github.com/gitorko/project67 Spring Batch - Multi Stage Job Orchestration project68 https://github.com/gitorko/project68 Spring Boot Micrometer - Prometheus, Wavefront project69 https://github.com/gitorko/project69 HTML reports with freemarker project70 https://github.com/gitorko/project70 Jasper Report with Spring project71 https://github.com/gitorko/project71 Spring Observability project72 https://github.com/gitorko/project72 Spring Cloud Sleuth \u0026amp; Zipkin project73 https://github.com/gitorko/project73 Spring Events project74 https://github.com/gitorko/project74 RabbitMQ Stream project75 https://github.com/gitorko/project75 Spring Boot - Querydsl project76 https://github.com/gitorko/project76 Spring Boot - Vault \u0026amp; Property Refresh project77 https://github.com/gitorko/project77 Java \u0026amp; Spring based State Machine project78 https://github.com/gitorko/project78 Spring \u0026amp; RabbitMQ project79 https://github.com/gitorko/project79 Spring Boot MVC Web project Thymeleaf, Login, Charts project80 https://github.com/gitorko/project80 Spring Boot \u0026amp; Kafka project81 https://github.com/gitorko/project81 Message Queue - Postgres project82 https://github.com/gitorko/project82 Spring Data JPA Essentials project83 https://github.com/gitorko/project83 Spring Reactor, Functional Programming, Completable Future Basics project84 https://github.com/gitorko/project84 Jenkins Pipeline + Data processing project85 https://github.com/gitorko/project85 Model Mapper project86 https://github.com/gitorko/project86 Clarity - Server Driven Data Grid with QueryDSL project87 https://github.com/gitorko/project87 Ticket Booking Application with QR code tickets project88 https://github.com/gitorko/project88 SpringBoot Web, JWT, Angular, Clarity, Authentication, Authorization, Postgres, Charts project89 https://github.com/gitorko/project89 SpringBoot Web + JWT + React.js + Bootstrap + Postgres + Google Charts project90 https://github.com/gitorko/project90 Flash Sale + RabbitMQ + Postgres + Jmeter project91 https://github.com/gitorko/project91 Spring Boot - Apache Ignite project92 https://github.com/gitorko/project92 Chat Server project93 https://github.com/gitorko/project93 Apache Superset + Employee DB project94 https://github.com/gitorko/project94 Voting System project95 https://github.com/gitorko/project95 Traefik Rate Limit project96 https://github.com/gitorko/project96 Spring Boot \u0026amp; GraphQL project97 https://github.com/gitorko/project97 Spring Integration project98 https://github.com/gitorko/project98 Spring Boot \u0026amp; Ehcache project99 https://github.com/gitorko/project99 Spring Boot Postgres - CQRS (Multiple Database) project100 https://github.com/gitorko/project100 Stock Exchange - Price Time Priority Algorithm project101 https://github.com/gitorko/project101 Spring Boot \u0026amp; Postgres - Multi-tenancy \u0026amp; Routing project102 https://github.com/gitorko/project102 deeplearning4j - Supervised classification (Neural Networks) project103 https://github.com/gitorko/project103 Spring Boot \u0026amp; Postgres - Text Search ","link":"https://gitorko.github.io/post/all-projects/all-projects/","section":"post","tags":["projects"],"title":"All Project References"},{"body":"","link":"https://gitorko.github.io/tags/projects/","section":"tags","tags":null,"title":"Projects"}]