diff --git "a/01jvm/week1/JVM\345\220\257\345\212\250\345\217\202\346\225\260.png" "b/01jvm/week1/JVM\345\220\257\345\212\250\345\217\202\346\225\260.png"
new file mode 100644
index 00000000..89647ea2
Binary files /dev/null and "b/01jvm/week1/JVM\345\220\257\345\212\250\345\217\202\346\225\260.png" differ
diff --git a/01jvm/week1/homework/pom.xml b/01jvm/week1/homework/pom.xml
new file mode 100644
index 00000000..25b03cb7
--- /dev/null
+++ b/01jvm/week1/homework/pom.xml
@@ -0,0 +1,12 @@
+
+
+ 4.0.0
+
+ com.geek.eddy
+ homework
+ 1.0-SNAPSHOT
+
+
+
\ No newline at end of file
diff --git a/01jvm/week1/homework/src/main/java/CustomizeClassLoader.java b/01jvm/week1/homework/src/main/java/CustomizeClassLoader.java
new file mode 100644
index 00000000..811e62bc
--- /dev/null
+++ b/01jvm/week1/homework/src/main/java/CustomizeClassLoader.java
@@ -0,0 +1,48 @@
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+
+public class CustomizeClassLoader extends ClassLoader {
+
+ public static void main(String[] args) {
+ try {
+ Class helloClass = new CustomizeClassLoader().findClass("Hello");
+ Method method = helloClass.getDeclaredMethod("hello");
+ method.invoke(helloClass.newInstance());
+ } catch (NoSuchMethodException e) {
+ e.printStackTrace();
+ } catch (IllegalAccessException e) {
+ e.printStackTrace();
+ } catch (InstantiationException e) {
+ e.printStackTrace();
+ } catch (InvocationTargetException e) {
+ e.printStackTrace();
+ }
+ }
+ @Override
+ protected Class> findClass(String name) {
+ InputStream inputStream = CustomizeClassLoader.class.getResourceAsStream("/Hello.xlass");
+ byte[] bytes = new byte[0];
+ try {
+ bytes = inputStream2byte(inputStream);
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ for (int i = 0, bytesLength = bytes.length; i < bytesLength; i++) {
+ bytes[i] = (byte)(255 - bytes[i]);
+ }
+ return defineClass(name, bytes, 0, bytes.length);
+ }
+ private static byte[] inputStream2byte(InputStream inputStream) throws IOException {
+ ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
+ byte[] buff = new byte[100];
+ int rc = 0;
+ while ((rc = inputStream.read(buff, 0, 100)) > 0) {
+ byteArrayOutputStream.write(buff, 0, rc);
+ }
+ return byteArrayOutputStream.toByteArray();
+ }
+
+}
diff --git a/01jvm/week1/homework/src/main/resources/Hello.xlass b/01jvm/week1/homework/src/main/resources/Hello.xlass
new file mode 100644
index 00000000..1d50c705
--- /dev/null
+++ b/01jvm/week1/homework/src/main/resources/Hello.xlass
@@ -0,0 +1 @@
+5EAÖ֩ѕ췚ߜГаГЬ곕ЖЯ앞ЖЯ׳ГЬ֩HNMIN
\ No newline at end of file
diff --git "a/01jvm/week2/GC\347\256\227\346\263\225\345\257\271\346\257\224\344\270\216\346\200\273\347\273\223.md" "b/01jvm/week2/GC\347\256\227\346\263\225\345\257\271\346\257\224\344\270\216\346\200\273\347\273\223.md"
new file mode 100644
index 00000000..a256feef
--- /dev/null
+++ "b/01jvm/week2/GC\347\256\227\346\263\225\345\257\271\346\257\224\344\270\216\346\200\273\347\273\223.md"
@@ -0,0 +1,152 @@
+## Week2 Homework
+
+### 根据上述自己对于 1 和 2 的演示,写一段对于不同 GC 和堆内存的总结,提交到 GitHub。
+
+
+
+#### 1.串行GC(Serial GC)
+
+**开启方式:**
+
+启动参数使用 -XX:+UseSerialGC
+
+**GC算法:**
+
+年轻代使用mark-copy(标记-复制)算法,老年代使用mark-sweep-compact(标记-清除-整理)算法
+
+**特点:**
+
+对年轻代和老年代的垃圾回收都会导致STW,不能充分利用多核cpu的计算资源,不管有多少个cpu,JVM在垃圾回收时只能使用单个核心。这样的话cpu占用率高,暂停时间长。
+
+**改进:**
+
+官方针对年轻代的垃圾回收做了改进,使用ParNew GC,可以对年轻代进行并行的垃圾回收,且可以配合CMS GC使用。
+
+**适用场景:**
+
+只适合几百MB堆内存的JVM,且是单核CPU的情况。
+
+
+
+#### 2.并行GC
+
+**开启方式:**
+
+Java6、7、8默认的GC算法,或添加启动参数 -XX:+UseParallelGC -XX:+UseParallelOldGC
+
+**GC算法:**
+
+年轻代使用mark-copy(标记-复制)算法,老年代使用mark-sweep-compact(标记-清除-整理)算法
+
+**特点:**
+
+对年轻代和老年代的垃圾回收扔会导致STW,但可以同时使用多个cpu内核进行垃圾回收。系统资源的利用率更高。默认使用系统核心数的线程来进行垃圾回收。设计目标是为了最大化吞吐量。且一般来说,它的吞吐量是最优的。
+
+**适用场景:**
+
+适用于多核服务器且更重视吞吐量的场景,因为对系统资源有效的利用,所以能实现更高的吞吐量。
+
+**对比:**
+
+相较于串行GC,并行GC的优势是在多核环境下会使用更多的核心线程进行垃圾回收,回收的效率更高,相应的GC暂停时间也会更短。
+
+
+
+#### 3.CMS GC
+
+**开启方式:**
+
+启动参数使用 -XX:+UseConcMarkSweepGC
+
+**GC 算法:**
+
+年轻代使用并行STW的mark-copy(标记-复制)算法——ParNew,老年代使用并发mark-sweep(标记-清除)算法
+
+**特点:**
+
+- 该收集器的设计目标是为了降低GC暂停导致的系统延迟。
+- 在老年代通过free-list管理空闲空间,而不直接整理old区内存,减少old区GC的暂停时间。
+- 另外在mark-sweep阶段,GC线程和应用线程并发执行,这样的话GC线程就可以尽量少地干扰业务线程执行的连续性。
+- 默认使用系统CPU核心数四分之一的线程进行GC。
+- 在Old区GC的过程中可能会伴随多次Minor GC
+- 初始标记和最终标记两个阶段还是会发生STW,其余时间GC和业务线程可以同时执行
+
+**适用场景:**
+
+多核心服务器,且业务系统对单次GC的暂停时间和延迟有比较高的要求,需要尽量短的GC暂停。
+
+**对比:**
+
+- 并行GC和CMS GC都是使用多线程来进行GC。
+- 并行GC会使用所有的线程来进行GC,业务线程会被暂停。CMS GC中业务线程和GC线程是可以并发执行的,即大部分时间可以同时进行
+
+**注意事项:**
+
+1. 对象晋升失败(promotion failed),老年代有足够的空间可以容纳晋升的对象,但是由于Old区GC算法没有整理的过程,空闲空间的碎片化,导致晋升失败,此时会触发单线程且伴随compact过程的Full GC。
+2. 并发模式失败,新生代发生垃圾回收,同时老年代又没有足够的空间容纳晋升的对象时,CMS 垃圾回收就会退化成单线程的Full GC。所有的应用线程都会被暂停,老年代中所有的无效对象都被回收。
+
+**解决方案:**
+
+1. 针对第一点,一个是增大堆区年轻代空间(蓄水池效应更明显)从而降低晋升速率。或者在启动参数配置UseCMSCompactAtFullCollection以及CMSFullGCsBeforeCompaction。
+2. 针对第二点,可以增加对内存,或者GC线程数。
+
+
+
+#### 4.G1 GC
+
+**开启方式:**
+
+启动参数使用 -XX:+UseG1GC
+
+**GC 算法:**
+
+收集算法基于cms基础上改进。G1 GC采用启发式的算法,每次都会回收当前标记为年轻代的Region,已经部分标记为老年代的Region。在并发阶段会估算每个Region内部的垃圾数量,优先回收垃圾较多的Region。每次做增量式的垃圾回收,GC效率高,灵活控制暂停时间。
+
+**特点:**
+
+- 在初始标记、最终标记、清理阶段会发生STW。
+- 目标是将STW的停顿时间和分布,变得可预期且可配置的,每次做增量式的GC。
+- 堆不再区分年轻代和老生代,而是划分成多个Region(通常是2048个),每个Region都可能被划分为Eden、Survivor、Old。
+- 每次GC只回收部分区域,GC效率高
+
+**适用场景:**
+
+较大堆内存的JVM,蓄水池作用更明显,则可以考虑使用G1 GC,一般情况下可能一次Full GC都不会发生。
+
+**对比:**
+
+- 对比串行并行CMS GC,打破了以往分带的模式。
+- 采用Region划分之后,G1 GC不需要每次都整理整个堆空间。
+- 采用RSet和Satb算法,垃圾对象标记效率较cms有极大的提升。
+- G1在初始标记阶段伴随一次Young GC,在最终标记阶段新增的无效引用会少很多,最终标记耗时少。
+
+**注意事项:**
+
+G1 GC在触发Full GC是,会退化成串行GC,即使用单线程回收垃圾,GC暂停时间可能到达秒级
+
+1. G1在启动标记周期时,由于Old区被填满,G1会放弃标记周期,这是并发标记失败。
+2. 晋升失败,当没有足够的内存供存活对象和晋升对象使用,由此触发Full GC。
+3. 巨型对象分配失败,当巨型对象找不到合适的空间进行分配,就会触发Full GC
+
+**解决方案:**
+
+1. 针对上述第一点,可以增大堆内存数,增加Old区填满的时间。或者可以增加垃圾回收的线程数(-XX:ConcGCThreads)。问题可以归结为垃圾产生的速度太快了,要不减慢Old区填满,要不加快垃圾回收。
+2. 针对第二点,可以通过设置-XX:G1ReservePrecent增加预留内存的百分比。或者可以通过降低IHOP提前进入标记周期。
+ 还有就是增加并发标记线程数。根本原因还是垃圾产生比回收快。
+3. 针对第三点,可以增加堆内存,或者调整Region大小(-XX:G1HeapRegionSize)。
+
+***增加堆内存向来是有效可行的办法***
+
+***吞吐量最优并不意味着GC暂停的时间是最短的。***
+
+
+
+#### 其他指标
+
+**分配速率**
+
+指单位时间内年轻代新增对象大小
+
+**晋升速率**
+
+指单位时间内堆区从年轻代晋升到年老代的对象大小
\ No newline at end of file
diff --git a/01jvm/week2/nettyserver/pom.xml b/01jvm/week2/nettyserver/pom.xml
new file mode 100644
index 00000000..211d91fc
--- /dev/null
+++ b/01jvm/week2/nettyserver/pom.xml
@@ -0,0 +1,31 @@
+
+
+ 4.0.0
+
+ com.geek
+ netty-server
+ 1.0-SNAPSHOT
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+ 6
+ 6
+
+
+
+
+
+
+
+ io.netty
+ netty-all
+ 4.1.59.Final
+
+
+
+
\ No newline at end of file
diff --git a/01jvm/week2/nettyserver/src/main/java/NettyServer.java b/01jvm/week2/nettyserver/src/main/java/NettyServer.java
new file mode 100644
index 00000000..576e9332
--- /dev/null
+++ b/01jvm/week2/nettyserver/src/main/java/NettyServer.java
@@ -0,0 +1,43 @@
+import io.netty.bootstrap.ServerBootstrap;
+import io.netty.buffer.PooledByteBufAllocator;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelOption;
+import io.netty.channel.EventLoopGroup;
+import io.netty.channel.nio.NioEventLoopGroup;
+import io.netty.channel.socket.nio.NioServerSocketChannel;
+import io.netty.handler.logging.LogLevel;
+import io.netty.handler.logging.LoggingHandler;
+
+public class NettyServer {
+
+ public static void main(String[] args) throws InterruptedException {
+ int port = 8081;
+ EventLoopGroup bossGroup = new NioEventLoopGroup(2);
+ EventLoopGroup workerGroup = new NioEventLoopGroup(16);
+
+ try {
+ ServerBootstrap serverBootstrap = new ServerBootstrap();
+ serverBootstrap.option(ChannelOption.SO_BACKLOG, 128)
+ .childOption(ChannelOption.TCP_NODELAY, true)
+ .childOption(ChannelOption.SO_KEEPALIVE, true)
+ .childOption(ChannelOption.SO_REUSEADDR, true)
+ .childOption(ChannelOption.SO_RCVBUF, 1024)
+ .childOption(ChannelOption.SO_SNDBUF, 1024)
+ .childOption(ChannelOption.SO_REUSEADDR, true)
+ .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
+
+ serverBootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
+ .handler(new LoggingHandler(LogLevel.INFO))
+ .childHandler(new TestInitializer());
+
+ Channel channel = serverBootstrap.bind(port).sync().channel();
+ System.out.println("Succeed start netty server,listen to http://localhost:" + port);
+ channel.closeFuture().sync();
+ } finally {
+ bossGroup.shutdownGracefully();
+ workerGroup.shutdownGracefully();
+ }
+
+
+ }
+}
\ No newline at end of file
diff --git a/01jvm/week2/nettyserver/src/main/java/RequestHandler.java b/01jvm/week2/nettyserver/src/main/java/RequestHandler.java
new file mode 100644
index 00000000..68a78049
--- /dev/null
+++ b/01jvm/week2/nettyserver/src/main/java/RequestHandler.java
@@ -0,0 +1,45 @@
+import io.netty.buffer.Unpooled;
+import io.netty.channel.ChannelFutureListener;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelInboundHandlerAdapter;
+import io.netty.handler.codec.http.*;
+import io.netty.util.ReferenceCountUtil;
+
+import java.nio.charset.Charset;
+
+public class RequestHandler extends ChannelInboundHandlerAdapter {
+
+ @Override
+ public void channelReadComplete(ChannelHandlerContext ctx) {
+ ctx.flush();
+ }
+
+ @Override
+ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
+ try {
+ FullHttpRequest fullHttpRequest = (FullHttpRequest) msg;
+ FullHttpResponse response = null;
+ try {
+ String responseMsg = "Request handled by RequestHandler.";
+ response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK,
+ Unpooled.wrappedBuffer(responseMsg.getBytes(Charset.forName("utf-8"))));
+ response.headers().set("Content-Type", "application/json");
+ response.headers().setInt("Content-Length", response.content().readableBytes());
+ } catch (Exception e) {
+ System.out.println("Proceed with error: " + e.getMessage());
+ response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.NO_CONTENT);
+ } finally {
+ if (!HttpUtil.isKeepAlive(fullHttpRequest)) {
+ ctx.write(response).addListener(ChannelFutureListener.CLOSE);
+ } else {
+ response.headers().set("Connection", "Keep-Alive");
+ ctx.write(response);
+ }
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ } finally {
+ ReferenceCountUtil.release(msg);
+ }
+ }
+}
\ No newline at end of file
diff --git a/01jvm/week2/nettyserver/src/main/java/TestInitializer.java b/01jvm/week2/nettyserver/src/main/java/TestInitializer.java
new file mode 100644
index 00000000..d4a5107d
--- /dev/null
+++ b/01jvm/week2/nettyserver/src/main/java/TestInitializer.java
@@ -0,0 +1,16 @@
+import io.netty.channel.ChannelInitializer;
+import io.netty.channel.ChannelPipeline;
+import io.netty.channel.socket.SocketChannel;
+import io.netty.handler.codec.http.HttpObjectAggregator;
+import io.netty.handler.codec.http.HttpServerCodec;
+
+public class TestInitializer extends ChannelInitializer {
+
+ @Override
+ protected void initChannel(SocketChannel ch) throws Exception {
+ ChannelPipeline pipeline = ch.pipeline();
+ pipeline.addLast(new HttpServerCodec())
+ .addLast(new HttpObjectAggregator(1024 * 1024))
+ .addLast(new RequestHandler());
+ }
+}
\ No newline at end of file
diff --git a/01jvm/week2/requestclient/pom.xml b/01jvm/week2/requestclient/pom.xml
new file mode 100644
index 00000000..f47e8b4d
--- /dev/null
+++ b/01jvm/week2/requestclient/pom.xml
@@ -0,0 +1,19 @@
+
+
+ 4.0.0
+
+ com.geek
+ request-client
+ 1.0-SNAPSHOT
+
+
+ org.apache.httpcomponents
+ httpclient
+ 4.5.12
+
+
+
+
+
\ No newline at end of file
diff --git a/01jvm/week2/requestclient/src/main/java/RequestClient.java b/01jvm/week2/requestclient/src/main/java/RequestClient.java
new file mode 100644
index 00000000..d51d78d6
--- /dev/null
+++ b/01jvm/week2/requestclient/src/main/java/RequestClient.java
@@ -0,0 +1,37 @@
+import org.apache.http.HttpEntity;
+import org.apache.http.client.methods.CloseableHttpResponse;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClientBuilder;
+import org.apache.http.util.EntityUtils;
+
+import java.io.IOException;
+
+public class RequestClient {
+
+ public static void main(String[] args) {
+ CloseableHttpClient client = HttpClientBuilder.create().build();
+ HttpGet httpGet = new HttpGet("http://localhost:8081");
+ CloseableHttpResponse response = null;
+
+ try {
+ response = client.execute(httpGet);
+ HttpEntity httpEntity = response.getEntity();
+ String responseContent = EntityUtils.toString(httpEntity, "utf-8");
+ System.out.println("Server response content:" + responseContent);
+ } catch (Exception e) {
+ e.printStackTrace();
+ } finally {
+ try {
+ if (client != null) {
+ client.close();
+ }
+ if (response != null) {
+ response.close();
+ }
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/03concurrency/week3/mustdo1/HttpHandler.java b/03concurrency/week3/mustdo1/HttpHandler.java
new file mode 100644
index 00000000..cc6bdb7d
--- /dev/null
+++ b/03concurrency/week3/mustdo1/HttpHandler.java
@@ -0,0 +1,76 @@
+package java0.nio01.netty;
+
+import io.netty.buffer.Unpooled;
+import io.netty.channel.ChannelFutureListener;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelInboundHandlerAdapter;
+import io.netty.handler.codec.http.DefaultFullHttpResponse;
+import io.netty.handler.codec.http.FullHttpRequest;
+import io.netty.handler.codec.http.FullHttpResponse;
+import io.netty.handler.codec.http.HttpUtil;
+import io.netty.util.ReferenceCountUtil;
+
+import static io.netty.handler.codec.http.HttpHeaderNames.CONNECTION;
+import static io.netty.handler.codec.http.HttpHeaderValues.KEEP_ALIVE;
+import static io.netty.handler.codec.http.HttpResponseStatus.NO_CONTENT;
+import static io.netty.handler.codec.http.HttpResponseStatus.OK;
+import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
+
+public class HttpHandler extends ChannelInboundHandlerAdapter {
+
+ @Override
+ public void channelReadComplete(ChannelHandlerContext ctx) {
+ ctx.flush();
+ }
+
+ @Override
+ public void channelRead(ChannelHandlerContext ctx, Object msg) {
+ try {
+ //logger.info("channelRead流量接口请求开始,时间为{}", startTime);
+ FullHttpRequest fullRequest = (FullHttpRequest) msg;
+ String uri = fullRequest.uri();
+ //logger.info("接收到的请求url为{}", uri);
+ if (uri.contains("/test")) {
+ handlerTest(fullRequest, ctx);
+ }
+
+ } catch(Exception e) {
+ e.printStackTrace();
+ } finally {
+ ReferenceCountUtil.release(msg);
+ }
+ }
+
+ private void handlerTest(FullHttpRequest fullRequest, ChannelHandlerContext ctx) {
+ FullHttpResponse response = null;
+ try {
+ String value = RequestClient.getResponseByHttpClient();
+
+ response = new DefaultFullHttpResponse(HTTP_1_1, OK, Unpooled.wrappedBuffer(value.getBytes("UTF-8")));
+ response.headers().set("Content-Type", "application/json");
+ response.headers().setInt("Content-Length", response.content().readableBytes());
+
+ } catch (Exception e) {
+ System.out.println("处理出错:"+e.getMessage());
+ response = new DefaultFullHttpResponse(HTTP_1_1, NO_CONTENT);
+ } finally {
+ if (fullRequest != null) {
+ if (!HttpUtil.isKeepAlive(fullRequest)) {
+ ctx.write(response).addListener(ChannelFutureListener.CLOSE);
+ } else {
+ response.headers().set(CONNECTION, KEEP_ALIVE);
+ ctx.write(response);
+ }
+ }
+ }
+ }
+
+ @Override
+ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
+ cause.printStackTrace();
+ ctx.close();
+ }
+
+
+
+}
diff --git a/03concurrency/week3/mustdo1/HttpInitializer.java b/03concurrency/week3/mustdo1/HttpInitializer.java
new file mode 100644
index 00000000..bf1279fb
--- /dev/null
+++ b/03concurrency/week3/mustdo1/HttpInitializer.java
@@ -0,0 +1,19 @@
+package java0.nio01.netty;
+
+import io.netty.channel.ChannelInitializer;
+import io.netty.channel.ChannelPipeline;
+import io.netty.channel.socket.SocketChannel;
+import io.netty.handler.codec.http.HttpObjectAggregator;
+import io.netty.handler.codec.http.HttpServerCodec;
+
+public class HttpInitializer extends ChannelInitializer {
+
+ @Override
+ public void initChannel(SocketChannel ch) {
+ ChannelPipeline p = ch.pipeline();
+ p.addLast(new HttpServerCodec());
+ //p.addLast(new HttpServerExpectContinueHandler());
+ p.addLast(new HttpObjectAggregator(1024 * 1024));
+ p.addLast(new HttpHandler());
+ }
+}
diff --git a/03concurrency/week3/mustdo1/NettyHttpServer.java b/03concurrency/week3/mustdo1/NettyHttpServer.java
new file mode 100644
index 00000000..3482483e
--- /dev/null
+++ b/03concurrency/week3/mustdo1/NettyHttpServer.java
@@ -0,0 +1,48 @@
+package java0.nio01.netty;
+
+import io.netty.bootstrap.ServerBootstrap;
+import io.netty.buffer.PooledByteBufAllocator;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelOption;
+import io.netty.channel.EventLoopGroup;
+import io.netty.channel.epoll.EpollChannelOption;
+import io.netty.channel.nio.NioEventLoopGroup;
+import io.netty.channel.socket.nio.NioServerSocketChannel;
+import io.netty.handler.logging.LogLevel;
+import io.netty.handler.logging.LoggingHandler;
+
+public class NettyHttpServer {
+ public static void main(String[] args) throws InterruptedException {
+
+ int port = 8808;
+
+ EventLoopGroup bossGroup = new NioEventLoopGroup(2);
+ EventLoopGroup workerGroup = new NioEventLoopGroup(16);
+
+ try {
+ ServerBootstrap b = new ServerBootstrap();
+ b.option(ChannelOption.SO_BACKLOG, 128)
+ .childOption(ChannelOption.TCP_NODELAY, true)
+ .childOption(ChannelOption.SO_KEEPALIVE, true)
+ .childOption(ChannelOption.SO_REUSEADDR, true)
+ .childOption(ChannelOption.SO_RCVBUF, 32 * 1024)
+ .childOption(ChannelOption.SO_SNDBUF, 32 * 1024)
+ .childOption(EpollChannelOption.SO_REUSEPORT, true)
+ .childOption(ChannelOption.SO_KEEPALIVE, true)
+ .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
+
+ b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
+ .handler(new LoggingHandler(LogLevel.INFO))
+ .childHandler(new HttpInitializer());
+
+ Channel ch = b.bind(port).sync().channel();
+ System.out.println("开启netty http服务器,监听地址和端口为 http://127.0.0.1:" + port + '/');
+ ch.closeFuture().sync();
+ } finally {
+ bossGroup.shutdownGracefully();
+ workerGroup.shutdownGracefully();
+ }
+
+
+ }
+}
diff --git a/03concurrency/week3/mustdo1/RequestClient.java b/03concurrency/week3/mustdo1/RequestClient.java
new file mode 100644
index 00000000..d2830398
--- /dev/null
+++ b/03concurrency/week3/mustdo1/RequestClient.java
@@ -0,0 +1,39 @@
+package java0.nio01.netty;
+
+import org.apache.http.HttpEntity;
+import org.apache.http.client.methods.CloseableHttpResponse;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClientBuilder;
+import org.apache.http.util.EntityUtils;
+
+import java.io.IOException;
+
+public class RequestClient {
+
+ public static String getResponseByHttpClient() {
+ CloseableHttpClient client = HttpClientBuilder.create().build();
+ HttpGet httpGet = new HttpGet("http://localhost:8081");
+ CloseableHttpResponse response = null;
+ String responseContent = null;
+ try {
+ response = client.execute(httpGet);
+ HttpEntity httpEntity = response.getEntity();
+ responseContent = EntityUtils.toString(httpEntity, "utf-8");
+ } catch (Exception e) {
+ e.printStackTrace();
+ } finally {
+ try {
+ if (client != null) {
+ client.close();
+ }
+ if (response != null) {
+ response.close();
+ }
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+ return responseContent;
+ }
+}
\ No newline at end of file
diff --git a/03concurrency/week3/mustdo2/CustomizeHttpRequestFilter.java b/03concurrency/week3/mustdo2/CustomizeHttpRequestFilter.java
new file mode 100644
index 00000000..a4a3563c
--- /dev/null
+++ b/03concurrency/week3/mustdo2/CustomizeHttpRequestFilter.java
@@ -0,0 +1,11 @@
+package io.github.kimmking.gateway.filter;
+
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.handler.codec.http.FullHttpRequest;
+
+public class CustomizeHttpRequestFilter implements HttpRequestFilter {
+ @Override
+ public void filter(FullHttpRequest fullRequest, ChannelHandlerContext ctx) {
+ fullRequest.headers().set("X-NETTY-DEMO", "Eddy Guo testing");
+ }
+}
diff --git a/03concurrency/week3/mustdo2/HttpInboundHandler.java b/03concurrency/week3/mustdo2/HttpInboundHandler.java
new file mode 100644
index 00000000..e4bee290
--- /dev/null
+++ b/03concurrency/week3/mustdo2/HttpInboundHandler.java
@@ -0,0 +1,81 @@
+package io.github.kimmking.gateway.inbound;
+
+import io.github.kimmking.gateway.filter.CustomizeHttpRequestFilter;
+import io.github.kimmking.gateway.filter.HttpRequestFilter;
+import io.github.kimmking.gateway.outbound.httpclient4.HttpOutboundHandler;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelInboundHandlerAdapter;
+import io.netty.handler.codec.http.FullHttpRequest;
+import io.netty.util.ReferenceCountUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+
+public class HttpInboundHandler extends ChannelInboundHandlerAdapter {
+
+ private static Logger logger = LoggerFactory.getLogger(HttpInboundHandler.class);
+ private final List proxyServer;
+ private HttpOutboundHandler handler;
+ private HttpRequestFilter filter = new CustomizeHttpRequestFilter();
+
+ public HttpInboundHandler(List proxyServer) {
+ this.proxyServer = proxyServer;
+ this.handler = new HttpOutboundHandler(this.proxyServer);
+ }
+
+ @Override
+ public void channelReadComplete(ChannelHandlerContext ctx) {
+ ctx.flush();
+ }
+
+ @Override
+ public void channelRead(ChannelHandlerContext ctx, Object msg) {
+ try {
+ //logger.info("channelRead流量接口请求开始,时间为{}", startTime);
+ FullHttpRequest fullRequest = (FullHttpRequest) msg;
+// String uri = fullRequest.uri();
+// //logger.info("接收到的请求url为{}", uri);
+// if (uri.contains("/test")) {
+// handlerTest(fullRequest, ctx);
+// }
+
+ handler.handle(fullRequest, ctx, filter);
+
+ } catch(Exception e) {
+ e.printStackTrace();
+ } finally {
+ ReferenceCountUtil.release(msg);
+ }
+ }
+
+// private void handlerTest(FullHttpRequest fullRequest, ChannelHandlerContext ctx) {
+// FullHttpResponse response = null;
+// try {
+// String value = "hello,kimmking";
+// response = new DefaultFullHttpResponse(HTTP_1_1, OK, Unpooled.wrappedBuffer(value.getBytes("UTF-8")));
+// response.headers().set("Content-Type", "application/json");
+// response.headers().setInt("Content-Length", response.content().readableBytes());
+//
+// } catch (Exception e) {
+// logger.error("处理测试接口出错", e);
+// response = new DefaultFullHttpResponse(HTTP_1_1, NO_CONTENT);
+// } finally {
+// if (fullRequest != null) {
+// if (!HttpUtil.isKeepAlive(fullRequest)) {
+// ctx.write(response).addListener(ChannelFutureListener.CLOSE);
+// } else {
+// response.headers().set(CONNECTION, KEEP_ALIVE);
+// ctx.write(response);
+// }
+// }
+// }
+// }
+//
+// @Override
+// public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
+// cause.printStackTrace();
+// ctx.close();
+// }
+
+}
diff --git a/03concurrency/week4/JUC.PNG b/03concurrency/week4/JUC.PNG
new file mode 100644
index 00000000..99f3b774
Binary files /dev/null and b/03concurrency/week4/JUC.PNG differ
diff --git "a/03concurrency/week4/Java\345\244\232\347\272\277\347\250\213.PNG" "b/03concurrency/week4/Java\345\244\232\347\272\277\347\250\213.PNG"
new file mode 100644
index 00000000..fd121a25
Binary files /dev/null and "b/03concurrency/week4/Java\345\244\232\347\272\277\347\250\213.PNG" differ
diff --git a/03concurrency/week4/README.md b/03concurrency/week4/README.md
new file mode 100644
index 00000000..cdda66dd
--- /dev/null
+++ b/03concurrency/week4/README.md
@@ -0,0 +1,95 @@
+### week - 04
+
+***
+
+**2**.(必做)思考有多少种方式,在 main 函数启动一个新线程,运行一个方法,拿到这个方法的返回值后,退出主线程? 写出所有方法。
+
+1. Runnable + getter
+
+ ```java
+ /**
+ * 这种方式下,Runnable的run方法并不会直接返回结果,
+ * 但是可以通过在实现类中设置变量,通过get方法获取变量的方法模拟获取线程的返回值
+ */
+ public class ByRunnable {
+ public static void main(String[] args) throws Exception {
+ TestRunnable testRunnable = new TestRunnable();
+ Thread thread = new Thread(testRunnable);
+ thread.start();
+ thread.join(); //wait for sub thread complete
+ System.out.println("result:"+testRunnable.getResult());
+ }
+
+ static final class TestRunnable implements Runnable{
+ private String result = "";
+ public void run() {
+ try {
+ System.out.println(Thread.currentThread().getName()+": start");
+ Thread.sleep(2000);
+ System.out.println(Thread.currentThread().getName()+": end");
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ result = "mock a result";
+ }
+ private String getResult() {
+ return result;
+ }
+ }
+ }
+ ```
+
+2. Future + Callable
+
+ ```java
+ /**
+ * Future和Callable配合使用,Callable的call方法本身是有返回值了
+ * 通过Future接收并封装子线程返回的结果,就可以在主线程获取子线程的返回值
+ */
+ public class ByFutureTaskAndCallable {
+ public static void main(String[] args) throws Exception {
+ TestCallable testCallable = new TestCallable();
+ FutureTask futureTask = new FutureTask(testCallable);
+ Thread thread = new Thread(futureTask);
+ thread.start();
+ System.out.println("result:"+futureTask.get());
+ }
+
+ static final class TestCallable implements Callable {
+ public String call() throws Exception {
+ System.out.println(Thread.currentThread().getName()+":执行 start");
+ Thread.sleep(2000);
+ System.out.println(Thread.currentThread().getName()+":执行 end");
+ return "Hello world";
+ }
+
+ }
+ }
+ ```
+
+3. 直接通过CompletableFuture
+
+ ```java
+ /**
+ * 以下是我在实际项目中通过CompletableFuture实现并发处理多数据源数据并在主线程中聚合结果的例子
+ * 通过CompletableFuture分别起两个子线程,主线程通过allOf等待两个子线程完成,通过thenAccept把future拿到的结果作为后续操作的入参
+ * 通过CopyOnWriteArrayList保证并发写入的有效性。
+ */
+ private List getAllMatchedTransactions(LocalDateTime startTime, LocalDateTime endTime) {
+ List monthlyReconciliationReportItems = new CopyOnWriteArrayList<>();
+ CompletableFuture> payCargoMonthlyReconciliationItems =
+ CompletableFuture.supplyAsync(() ->
+ reconciliationDao.findPayCargoPaymentReconciliationItem(startTime, endTime, PAY_CARGO.getValue()))
+ .thenApply(this::convertPayCaroReportItem2MonthlyReconciliationReportItemList);
+ CompletableFuture> cpcnMonthlyReconciliationItems =
+ CompletableFuture.supplyAsync(() ->
+ reconciliationDao.findCpcnPaymentReconciliationItem(startTime, endTime, "", ""))
+ .thenApply(this::convertCPCNReportItem2MonthlyReconciliationReportItemList);
+ CompletableFuture.allOf(
+ payCargoMonthlyReconciliationItems.thenAccept(monthlyReconciliationReportItems::addAll),
+ cpcnMonthlyReconciliationItems.thenAccept(monthlyReconciliationReportItems::addAll)
+ ).join();
+ return monthlyReconciliationReportItems;
+ }
+ ```
+
diff --git "a/03concurrency/week4/\345\244\232\347\272\277\347\250\213\345\217\212\345\271\266\345\217\221.xmind" "b/03concurrency/week4/\345\244\232\347\272\277\347\250\213\345\217\212\345\271\266\345\217\221.xmind"
new file mode 100644
index 00000000..70ea94f3
Binary files /dev/null and "b/03concurrency/week4/\345\244\232\347\272\277\347\250\213\345\217\212\345\271\266\345\217\221.xmind" differ
diff --git "a/03concurrency/week4/\347\272\277\347\250\213\345\256\211\345\205\250.PNG" "b/03concurrency/week4/\347\272\277\347\250\213\345\256\211\345\205\250.PNG"
new file mode 100644
index 00000000..7e51aa83
Binary files /dev/null and "b/03concurrency/week4/\347\272\277\347\250\213\345\256\211\345\205\250.PNG" differ
diff --git "a/03concurrency/week4/\347\272\277\347\250\213\346\261\240.PNG" "b/03concurrency/week4/\347\272\277\347\250\213\346\261\240.PNG"
new file mode 100644
index 00000000..a1753db7
Binary files /dev/null and "b/03concurrency/week4/\347\272\277\347\250\213\346\261\240.PNG" differ
diff --git a/04fx/week05/week05 homework.md b/04fx/week05/week05 homework.md
new file mode 100644
index 00000000..4fd2a921
--- /dev/null
+++ b/04fx/week05/week05 homework.md
@@ -0,0 +1,785 @@
+**必做题2**:写代码实现 Spring Bean 的装配,方式越多越好(XML、Annotation 都可以)。
+
+> Bean的装配是指在Srping的IOC容器中,通常情况下IOC都是指依赖注入DI。当然依赖查找也是IOC的实现方式之一。
+
+### 1 XML配置
+
+### 1.1 Setter注入
+
+定义SetterBean这个类
+
+```java
+/*
+ 仅需要添加getter/setter方法,不需要全参构造方法
+ */
+public class SetterBean {
+ private String name;
+ private String desc;
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public void setDesc(String desc) {
+ this.desc = desc;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public String getDesc() {
+ return desc;
+ }
+
+ public void printInfo() {
+ System.out.println("name: " + this.name );
+ System.out.println("desc: " + this.desc);
+ System.out.println("timestamp: " + LocalDateTime.now());
+ }
+}
+```
+
+配置applicaitoncontext.xml,将SetterBean注册成bean,并且通过set值的方法将属性注入到bean
+
+```xml
+
+
+
+
+
+```
+
+测试类和结果
+
+```java
+public class SetterInjectionTest {
+
+ public static void main(String[] args) {
+ ApplicationContext applicationContext =
+ new ClassPathXmlApplicationContext("applicationContext.xml");
+
+ SetterBean setterBean = (SetterBean) applicationContext.getBean("setterBean");
+
+ setterBean.printInfo();
+ }
+}
+
+结果:
+name: setter injection
+desc: setter inject test
+timestamp: 2021-04-18T23:06:51.297
+```
+
+### 1.2 构造器注入
+
+定义ConstructorBean这个类
+
+```java
+/*
+ 需要添加全参构造方法,不需要setter方法
+ */
+public class ConstructorBean {
+
+ private String name;
+ private String desc;
+
+ public ConstructorBean(String name, String desc) {
+ this.name = name;
+ this.desc = desc;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public String getDesc() {
+ return desc;
+ }
+
+ public void printInfo() {
+ System.out.println("name: " + this.name );
+ System.out.println("desc: " + this.desc);
+ System.out.println("timestamp: " + LocalDateTime.now());
+ }
+}
+```
+
+配置applicaitoncontext.xml,将ConstructorBean注册成bean,并且通过构造函数将属性注入到bean
+
+```xml
+
+
+
+
+
+```
+
+测试类和结果
+
+```java
+public class ConsrtuctorInjectionTest {
+
+ public static void main(String[] args) {
+ ApplicationContext applicationContext =
+ new ClassPathXmlApplicationContext("applicationContext.xml");
+
+ ConstructorBean constructorBean = (ConstructorBean) applicationContext.getBean("constructorBean");
+
+ constructorBean.printInfo();
+ }
+}
+
+结果:
+name: constructor injection
+desc: constructor inject test
+timestamp: 2021-04-18T23:17:10.333
+```
+
+### 2 注解注入(Annotation)
+
+定义AnnotationBean类,通过@Component的方式将其注册成Bean
+
+```java
+/*
+ 通过@Component将AnnotationBean类注册成Bean
+ 通过PostConstruct在Bean构造阶段进行属性赋值
+ */
+@Component("annotationBean")
+public class AnnotationBean {
+
+ private String name;
+ private String desc;
+
+ @PostConstruct
+ private void setup() {
+ this.name = "annotation injection";
+ this.desc = "annotation inject test";
+ }
+
+ public void printInfo() {
+ System.out.println("name: " + this.name );
+ System.out.println("desc: " + this.desc);
+ System.out.println("timestamp: " + LocalDateTime.now());
+ }
+}
+```
+
+配置applicationcontext.xml,配置component扫描路径,将对应路径下的通过注解(@Component、@Controller、@Service、@Repository等)注册成Bean的类。
+
+```xml
+
+```
+
+测试类和结果
+
+```java
+public class AnnotationInjectionTest {
+
+ public static void main(String[] args) {
+ ApplicationContext applicationContext =
+ new ClassPathXmlApplicationContext("applicationContext.xml");
+
+ AnnotationBean annotationBean = (AnnotationBean) applicationContext.getBean("annotationBean");
+
+ annotationBean.printInfo();
+ }
+}
+
+结果:
+name: annotation injection
+desc: annotation inject test
+timestamp: 2021-04-18T23:25:10.938
+```
+
+
+
+***
+
+
+
+**必做题8**:给前面课程提供的 Student/Klass/School 实现自动配置和 Starter。
+
+### 1自定义Starter
+
+#### 1.1 定义Student类
+
+定义Student类,我们会在starter配置类中通过方法返回值的方式把Student注册成Bean
+
+```java
+/*
+ 定义Student类,我们会在starter配置类中通过方法返回值的方式把Student注册成Bean
+ */
+public class Student {
+
+ private int id;
+ private String name;
+
+ public Student(int id, String name) {
+ this.id = id;
+ this.name = name;
+ }
+
+ public void printInfo() {
+ System.out.println("id: " + this.id);
+ System.out.println("name: " + this.name);
+ System.out.println("timestamp: " + LocalDateTime.now());
+ }
+}
+```
+
+#### 1.2 定义StudentProperties类
+
+> 该类可以用于定义该starter的可配置属性。通过@Component将该类注册为Bean,使得Spring容器能管理该类并在初始化过程中注入值。并且通过@ConfigurationProperties读取application.properties/application.yml配置的属性值并注入到该类。
+
+```java
+/*
+ 定义StudentProperties类用于定义该starter的可配置属性
+ 通过@Component将该类注册为Bean,使得Spring容器能管理该类并在初始化过程中注入值
+ 并且通过@ConfigurationProperties读取application.properties/application.yml配置的属性值并注入到该类
+ */
+@Component
+@Getter
+@Setter
+@ConfigurationProperties(prefix = "student")
+public class StudentProperties implements Serializable {
+
+ private int id;
+ private String name;
+
+}
+```
+
+#### 1.3 定义StudentConfig类
+
+> 定义StudentConfig类,并且通过@Configuration装配该类。该类也是该starter通过spring.factories拉起的核心类。通过@EnableConfigurationProperties和@Autowired将读取到的属性注入。Student类则作为该starter的核心功能类,类比DataSources,通过@Bean的方式注入到Spring容器。
+
+```java
+/*
+ 定义StudentConfig类,并且通过@Configuration装配该类。该类也是该starter通过spring.factories拉起的核心类。
+ 通过@EnableConfigurationProperties和@Autowired将读取到的属性注入。
+ Student类则作为该starter的核心功能类,类比DataSources,通过@Bean的方式注入到Spring容器
+ 当配置文件里面student前缀下name属性为stu001时,才拉起该配置类,即才起用该starter
+ */
+@Configuration
+@EnableConfigurationProperties(StudentProperties.class)
+@ConditionalOnClass(Student.class)
+@ConditionalOnProperty(prefix = "student", name = "name", havingValue = "stu001")
+public class StudentConfig {
+
+ @Autowired
+ private StudentProperties studentProperties;
+
+ @Bean
+ @ConditionalOnMissingBean(Student.class)
+ public Student student() {
+ return new Student(studentProperties.getId(), studentProperties.getName());
+ }
+}
+```
+
+#### 1.4 元信息配置
+
+定义spring.factories指定starter的入口配置类
+
+```properties
+org.springframework.boot.autoconfigure.EnableAutoConfiguration=\
+ com.geek.eddy.starterdemo.StudentConfig
+```
+
+定义spring.provides执行starter的名称
+
+```yml
+provides: student-demo-starter
+```
+
+#### 1.5 将starter install到本地的maven库
+
+```
+mvn clean install
+```
+
+
+
+### 2 引用自定义starter并测试
+
+#### 2.1 新建项目并引入自定义starter的maven依赖
+
+```xml
+
+
+ com.geek.eddy
+ student-demo-starter
+ 0.0.1-SNAPSHOT
+
+```
+
+#### 2.2 配置文件中配置starter所需的属性(application.properties/application.yml都可以)
+
+```properties
+student.id=123
+student.name=stu001
+```
+
+#### 2.3 测试类及执行结果
+
+> 通过@RestController创建接入层Bean以便暴露接口进行测试。通过@Autowired注入starter中装配的核心功能Bean Student
+
+```java
+/*
+ 新启动springboot项目,通过@RestController创建接入层Bean以便暴露接口进行测试
+ 通过@Autowired注入starter中装配的核心功能Bean Student
+ */
+@RestController
+public class StarterTestController {
+
+ @Autowired
+ private Student student;
+
+ @GetMapping("/print-student-info")
+ public void printInfo() {
+ student.printInfo();
+ }
+}
+
+结果:
+id: 123
+name: stu001
+timestamp: 2021-04-18 01:50:47.025
+```
+
+
+
+***
+
+
+
+**必做题10**:研究一下 JDBC 接口和数据库连接池,掌握它们的设计和用法:
+
+### 1 使用 JDBC 原生接口,实现数据库的增删改查操作。
+
+#### 1.1 Repository层,定义JdbcNativeRepository数据库操作类
+
+```java
+/*
+ 使用原生的jdbc接口进行数据库操作,每次操作前获取一个连接,在新建statement执行sql
+ 操作后主动关闭statement以及connection
+ */
+@Repository
+public class JdbcNativeRepository {
+
+ private static Connection connection;
+ private static Statement statement;
+
+ //每次获取新的connection再创建新的statement,然后执行insert、update、delete操作
+ public int updateOperation(String sql) throws SQLException, ClassNotFoundException {
+ try {
+ connection = JdbcUtil.getNewConnection();
+ statement = connection.createStatement();
+ return statement.executeUpdate(sql);
+ } catch (SQLException | ClassNotFoundException e) {
+ e.printStackTrace();
+ throw e;
+ } finally {
+ JdbcUtil.closeResources(connection, statement);
+ }
+ }
+
+ //每次获取新的connection再创建新的statement,然后执行query操作
+ public ResultSet queryOperation(String sql) throws SQLException, ClassNotFoundException {
+ try {
+ connection = JdbcUtil.getNewConnection();
+ statement = connection.createStatement();
+ return statement.executeQuery(sql);
+ } catch (SQLException | ClassNotFoundException e) {
+ e.printStackTrace();
+ throw e;
+ } finally {
+ JdbcUtil.closeResources(connection, statement);
+ }
+ }
+}
+```
+
+#### 1.2 Service层,增删查改操作各100次
+
+```java
+@Service
+public class JdbcNativeService {
+
+ @Autowired
+ private JdbcNativeRepository jdbcNativeRepository;
+
+ public void insert100Times() throws SQLException, ClassNotFoundException {
+ for (int i = 0; i < 100 ; i++) {
+ String id = String.valueOf(i);
+ String name = "user" + i;
+ String insertSql = "insert into user(id, name) values('%s', '%s')";
+ jdbcNativeRepository.updateOperation(String.format(insertSql, id, name));
+ }
+ }
+
+ public void update100Times() throws SQLException, ClassNotFoundException {
+ for (int i = 0; i < 100 ; i++) {
+ String id = String.valueOf(i);
+ String updateSql = "update user set name = 'testName' where id = '%s'";
+ jdbcNativeRepository.updateOperation(String.format(updateSql, id));
+ }
+ }
+
+ public void delete100Times() throws SQLException, ClassNotFoundException {
+ for (int i = 0; i < 100 ; i++) {
+ String id = String.valueOf(i);
+ String deleteSql = "delete from user where id = '%s'";
+ jdbcNativeRepository.updateOperation(String.format(deleteSql, id));
+ }
+ }
+
+ public void query100Times() throws SQLException, ClassNotFoundException {
+ for (int i = 0; i < 100 ; i++) {
+ String id = String.valueOf(i);
+ String updateSql = "select * from user where id = '%s'";
+ jdbcNativeRepository.queryOperation(String.format(updateSql, id));
+ }
+ }
+}
+```
+
+#### 1.3 Controller层,测试接口及结果
+
+```java
+@RestController
+public class JdbcTestController {
+
+ @Autowired
+ private JdbcNativeService jdbcNativeService;
+
+ @GetMapping("test-jdbc-native-update-method")
+ public void testJdbcNativeUpdateMethod() throws SQLException, ClassNotFoundException {
+ System.out.println("测试原生jdbc更新接口:100次insert、100次update、100次delete");
+ long starTime = System.currentTimeMillis();
+
+ jdbcNativeService.insert100Times();
+ System.out.println("成功insert100次!");
+
+ jdbcNativeService.update100Times();
+ System.out.println("成功update100次!");
+
+ jdbcNativeService.delete100Times();
+ System.out.println("成功delete100次!");
+
+ long endTime = System.currentTimeMillis();
+ System.out.println("duration:"+(endTime - starTime));
+ }
+
+ @GetMapping("test-jdbc-native-query-method")
+ public void testJdbcNativeQueryMethod() throws SQLException, ClassNotFoundException {
+ System.out.println("测试原生jdbc查询接口:100次update");
+ long starTime = System.currentTimeMillis();
+
+ jdbcNativeService.query100Times();
+ System.out.println("成功update100次!");
+
+ long endTime = System.currentTimeMillis();
+ System.out.println("duration:"+(endTime - starTime));
+ }
+}
+
+结果:
+测试原生jdbc更新接口:100次insert、100次update、100次删除
+成功insert100次
+成功update100次
+成功delete100次
+duration:25876ms
+
+测试原生jdbc查询接口:100次update
+成功update100次!
+duration:2676ms
+```
+
+### 2 使用事务,PrepareStatement 方式,批处理方式,改进上述操作。
+
+#### 2.1 Repository层,定义JdbcEnhanceRepository数据库操作类
+
+```java
+/*
+ 优化:
+ 1. 采用声明式的事务,所有方法手动提交事务
+ 2. 通过Connection的prepareStatement()方法对sql进行预处理,防止注入攻击
+ 3. 通过PrepareStatement的addBatch()方法,积累sql并批量执行。无需每次操作都重复地建立多次连接
+*/
+@Repository
+public class JdbcEnhanceRepository {
+
+ private static Connection connection;
+ private static PreparedStatement preparedStatement;
+
+ public void batchInsertOperation() throws SQLException, ClassNotFoundException {
+ try {
+ connection = JdbcUtil.getNewConnection();
+ connection.setAutoCommit(false);
+ String insertSql = "insert into user(id, name) values(?, ?)";
+ preparedStatement = connection.prepareStatement(insertSql);
+ for (int i = 0; i < 100; i++) {
+ preparedStatement.setString(1, String.valueOf(i));
+ preparedStatement.setString(2, "stu" + i);
+ preparedStatement.addBatch();
+ }
+ preparedStatement.execute();
+ connection.commit();
+ } catch (SQLException | ClassNotFoundException e) {
+ e.printStackTrace();
+ throw e;
+ } finally {
+ JdbcUtil.closeResources(connection, preparedStatement);
+ }
+ }
+
+ public void batchUpdateOperation() throws SQLException, ClassNotFoundException {
+ try {
+ connection = JdbcUtil.getNewConnection();
+ connection.setAutoCommit(false);
+ String deleteSql = "delete from user where id = ?";
+ preparedStatement = connection.prepareStatement(deleteSql);
+ for (int i = 0; i < 100; i++) {
+ preparedStatement.setString(1, String.valueOf(i));
+ preparedStatement.addBatch();
+ }
+ preparedStatement.execute();
+ connection.commit();
+ } catch (SQLException | ClassNotFoundException e) {
+ e.printStackTrace();
+ throw e;
+ } finally {
+ JdbcUtil.closeResources(connection, preparedStatement);
+ }
+ }
+
+ public void batchDeleteOperation() throws SQLException, ClassNotFoundException {
+ try {
+ connection = JdbcUtil.getNewConnection();
+ connection.setAutoCommit(false);
+ String updateSql = "update user set name = 'testName' where id = ?";
+ preparedStatement = connection.prepareStatement(updateSql);
+ for (int i = 0; i < 100; i++) {
+ preparedStatement.setString(1, String.valueOf(i));
+ preparedStatement.addBatch();
+ }
+ preparedStatement.execute();
+ connection.commit();
+ } catch (SQLException | ClassNotFoundException e) {
+ e.printStackTrace();
+ throw e;
+ } finally {
+ JdbcUtil.closeResources(connection, preparedStatement);
+ }
+ }
+
+ public ResultSet batchQueryOperation(String sql) throws SQLException, ClassNotFoundException {
+ try {
+ connection = JdbcUtil.getNewConnection();
+ preparedStatement = connection.prepareStatement(sql);
+ for (int i = 0; i < 100; i++) {
+ preparedStatement.setString(1, String.valueOf(i));
+ preparedStatement.addBatch();
+ }
+ return preparedStatement.executeQuery();
+ } catch (SQLException | ClassNotFoundException e) {
+ e.printStackTrace();
+ throw e;
+ } finally {
+ JdbcUtil.closeResources(connection, preparedStatement);
+ }
+ }
+}
+```
+
+#### 2.2 Service层,增删查改操作各100次
+
+```java
+@Service
+public class JdbcEnhanceService {
+
+ @Autowired
+ private JdbcEnhanceRepository jdbcEnhanceRepository;
+
+ public void insert100Times() throws SQLException, ClassNotFoundException {
+ jdbcEnhanceRepository.batchInsertOperation();
+ }
+
+ public void update100Times() throws SQLException, ClassNotFoundException {
+ jdbcEnhanceRepository.batchUpdateOperation();
+
+ }
+
+ public void delete100Times() throws SQLException, ClassNotFoundException {
+ jdbcEnhanceRepository.batchUpdateOperation();
+
+ }
+
+ public void query100Times() throws SQLException, ClassNotFoundException {
+ String updateSql = "select * from user where id = ?";
+ jdbcEnhanceRepository.batchQueryOperation(updateSql);
+ }
+}
+```
+
+#### 2.3 Controller层,测试接口及结果
+
+
+
+### 3 配置 Hikari 连接池,改进上述操作。
+
+#### 3.1 Repository层,定义JdbcPoolRepository数据库操作类
+
+> Springboot配置DataSource,默认会使用HikariDataSource,这也意味着连接池使用的是HikariPool,无需手动配置连接池。
+
+```java
+@Repository
+public class JdbcPoolRepository {
+
+ @Autowired
+ private DataSource dataSource;
+
+ @PostConstruct
+ public void setup() {
+ /*
+ 输出结果为:com.zaxxer.hikari.HikariDataSource
+ Springboot配置DataSource,默认会使用HikariDataSource,
+ 这也意味着连接池使用的是HikariPool,无需手动配置连接池
+ */
+ System.out.println(dataSource.getClass().getTypeName());
+ }
+
+ /*
+ 通过DataSource获取新的connection,是从连接池里面获取的,并非http握手、密码认证创建新连接。
+ 再创建新的statement,然后执行insert、delete、update操作
+ */
+ public int updateOperation(String sql) throws SQLException {
+ Connection connection = dataSource.getConnection();
+ Statement statement = connection.createStatement();
+ try {
+ return statement.executeUpdate(sql);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ throw e;
+ } finally {
+ JdbcUtil.closeResources(connection, statement);
+ }
+ }
+
+ /*
+ 通过DataSource获取新的connection,是从连接池里面获取的,并非http握手、密码认证创建新连接。
+ 再创建新的statement,然后执行query操作
+ */
+ public ResultSet queryOperation(String sql) throws SQLException {
+ Connection connection = dataSource.getConnection();
+ Statement statement = connection.createStatement();
+ try {
+ return statement.executeQuery(sql);
+ } catch (SQLException e) {
+ e.printStackTrace();
+ throw e;
+ } finally {
+ JdbcUtil.closeResources(connection, statement);
+ }
+ }
+
+}
+```
+
+#### 3.2 Service层,增删查改操作各100次
+
+```java
+@Service
+public class JdbcPoolService {
+
+ @Autowired
+ private JdbcPoolRepository jdbcPoolRepository;
+
+ public void insert100Times() throws SQLException {
+ for (int i = 1; i <= 100 ; i++) {
+ String id = String.valueOf(i);
+ String name = "user" + i;
+ String insertSql = "insert into user(id, name) values('%s', '%s')";
+ jdbcPoolRepository.updateOperation(String.format(insertSql, id, name));
+ }
+ }
+
+ public void update100Times() throws SQLException {
+ for (int i = 1; i <= 100 ; i++) {
+ String id = String.valueOf(i);
+ String updateSql = "update user set name = 'testName' where id = '%s'";
+ jdbcPoolRepository.updateOperation(String.format(updateSql, id));
+ }
+ }
+
+ public void delete100Times() throws SQLException {
+ for (int i = 1; i <= 100 ; i++) {
+ String id = String.valueOf(i);
+ String deleteSql = "delete from user where id = '%s'";
+ jdbcPoolRepository.updateOperation(String.format(deleteSql, id));
+ }
+ }
+
+ public void query100Times() throws SQLException {
+ for (int i = 1; i <= 100 ; i++) {
+ String id = String.valueOf(i);
+ String updateSql = "select * from user where id = '%s'";
+ jdbcPoolRepository.queryOperation(String.format(updateSql, id));
+ }
+ }
+}
+```
+
+#### 3.3 Controller层,测试接口及结果
+
+```java
+@RestController
+public class JdbcTestController {
+
+ @Autowired
+ private JdbcPoolService jdbcPoolService;
+
+ @GetMapping("test-jdbc-pool-update-method")
+ public void testJdbcPoolMethod() throws SQLException, ClassNotFoundException {
+ System.out.println("测试HikariDataSource:100次insert、100次update、100次delete");
+ long starTime = System.currentTimeMillis();
+
+ jdbcPoolService.insert100Times();
+ System.out.println("成功insert100次!");
+
+ jdbcPoolService.update100Times();
+ System.out.println("成功update100次!");
+
+ jdbcPoolService.delete100Times();
+ System.out.println("成功delete100次!");
+
+ long endTime = System.currentTimeMillis();
+ System.out.println("duration:"+(endTime - starTime));
+ }
+
+ @GetMapping("test-jdbc-pool-query-method")
+ public void testJdbcPoolQueryMethod() throws SQLException, ClassNotFoundException {
+ System.out.println("测试HikariDataSource:100次query");
+ long starTime = System.currentTimeMillis();
+
+ jdbcPoolService.query100Times();
+ System.out.println("成功query100次!");
+
+ long endTime = System.currentTimeMillis();
+ System.out.println("duration:"+(endTime - starTime));
+ }
+}
+
+结果:
+测试HikariDataSource:100次insert、100次update、100次delete
+成功insert100次
+成功update100次
+成功delete100次
+duration:20293ms
+
+测试HikariDataSource:100次query
+成功query100次
+duration:165ms
+```
\ No newline at end of file
diff --git a/04fx/week06/week06 homework.md b/04fx/week06/week06 homework.md
new file mode 100644
index 00000000..70b2ef24
--- /dev/null
+++ b/04fx/week06/week06 homework.md
@@ -0,0 +1,88 @@
+```sql
+##用户信息表
+CREATE TABLE `user` (
+ `user_id` varchar(15) NOT NULL AUTO_INCREMENT COMMENT '用户id',
+ `user_name` varchar(15) NOT NULL DEFAULT '' COMMENT '用户名',
+ `first_name` varchar(15) DEFAULT '' COMMENT '用户真名',
+ `last_name` varchar(10) DEFAULT '' COMMENT '用户姓氏',
+ `nick_name` varchar(10) DEFAULT '' COMMENT '用户昵称',
+ `password` varchar(16) NOT NULL COMMENT '密码',
+ `phone_number` VARCHAR(11) NOT NULL COMMENT '电话号码',
+ `sex` int(1) NOT NULL COMMENT '性别',
+ `birthday` datetime DEFAULT NOT NULL COMMENT '生日',
+ `email` varchar(20) NOT NULL COMMENT '邮箱',
+ `primary_address` varchar(30) NOT NULL DEFAULT '' COMMENT '区位地址',
+ `detail_address` varchar(30) NOT NULL DEFAULT '' COMMENT '详细地址'
+ PRIMARY KEY (`userid`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+```
+
+```sql
+##订单模块
+CREATE TABLE `order` (
+ `order_id` varchar(32) NOT NULL COMMENT '订单id',
+ `user_id` varchar(15) DEFAULT NULL COMMENT '用户id',
+ `amount` decimal(20,2) DEFAULT NULL COMMENT '付款金额',
+ `status` int(10) DEFAULT NULL COMMENT '订单状态',
+ `delivery_fee` int(10) DEFAULT NULL COMMENT '运费',
+ `order_time` datetime DEFAULT NULL COMMENT '生成时间',
+ `delivery_time` datetime DEFAULT NULL COMMENT '发货时间',
+ `complete_time` datetime DEFAULT NULL COMMENT '完成时间',
+ `close_time` datetime DEFAULT NULL COMMENT '交易关闭时间',
+ `modify_by` datetime DEFAULT NULL COMMENT '更改方',
+ `modify_time` datetime DEFAULT NULL COMMENT '更改时间'
+ PRIMARY KEY (`orderid`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+```
+
+```sql
+##交易支付信息
+CREATE TABLE `payment` (
+ `transaction_id` varchar(64) NOT NULL COMMENT '交易id',
+ `order_id` varchar(32) DEFAULT NULL COMMENT '订单id',
+ `user_id` varchar(15) DEFAULT NULL COMMENT '用户id',
+ `amount` decimal(20,2) DEFAULT NULL COMMENT '付款金额',
+ `currency` varchar(10) DEFAULT NULL COMMENT '币种',
+ `payer_id` varchar(15) DEFAULT NULL COMMENT '支付人id',
+ `payer_name` varchar(15) DEFAULT NULL COMMENT '支付人',
+ `payment_platform` int(2) DEFAULT NULL COMMENT '支付平台',
+ `payment_type` int(2) DEFAULT NULL COMMENT '支付类型,1-个人支付,2-企业支付',
+ `status` varchar(10) DEFAULT NULL COMMENT '支付状态',
+ `create_time` datetime DEFAULT NULL COMMENT '创建时间',
+ `update_time` datetime DEFAULT NULL COMMENT '更新时间',
+ PRIMARY KEY (`payid`),
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+```
+
+```sql
+##商品信息
+CREATE TABLE `commodity` (
+ `commodity_id` varchar(32) NOT NULL COMMENT '商品id',
+ `commodity_name` varchar(100) NOT NULL COMMENT '商品名称',
+ `category_id` varchar(32) DEFAULT NULL COMMENT '类别编号',
+ `price` decimal(20,2) NOT NULL COMMENT '价格',
+ `discount` decimal(2,2) NOT NULL COMMENT '折扣',
+ `currency` varchar(10) DEFAULT NULL COMMENT '币种',
+ `stock` int(11) NOT NULL COMMENT '库存数量',
+ `status` int(6) DEFAULT '1' COMMENT '商品状态,1-在售 2-下架 3-删除',
+ `detail` text COMMENT '商品详情',
+ `image` varchar(1000) DEFAULT NULL COMMENT '图片base64编码',
+ `create_time` datetime DEFAULT NULL COMMENT '创建时间',
+ `update_time` datetime DEFAULT NULL COMMENT '更新时间',
+ PRIMARY KEY (`proid`),
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+```
+
+```sql
+##商品类别
+CREATE TABLE `category` (
+ `category_id` varchar(32) NOT NULL COMMENT '类别Id',
+ `parent_category` varchar(32) DEFAULT NULL COMMENT '父类id',
+ `name` varchar(25) DEFAULT NULL COMMENT '类别名称',
+ `status` int(2) DEFAULT '1' COMMENT '状态',
+ `create_time` datetime DEFAULT NULL COMMENT '创建时间',
+ `update_time` datetime DEFAULT NULL COMMENT '更新时间',
+ PRIMARY KEY (`cateid`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+```
+
diff --git a/04fx/week07/Week 07.md b/04fx/week07/Week 07.md
new file mode 100644
index 00000000..855253af
--- /dev/null
+++ b/04fx/week07/Week 07.md
@@ -0,0 +1,11 @@
+## Week 07
+
+**2.(必做)**按自己设计的表结构,插入 100 万订单模拟数据,测试不同方式的插入效率
+
+
+
+**9.(必做)**读写分离 - 动态切换数据源版本 1.0
+
+
+
+**10.(必做)**读写分离 - 数据库框架版本 2.0
\ No newline at end of file
diff --git a/04fx/week08/homework week08.md b/04fx/week08/homework week08.md
new file mode 100644
index 00000000..4e7682e9
--- /dev/null
+++ b/04fx/week08/homework week08.md
@@ -0,0 +1,528 @@
+**2.(必做)**设计对前面的订单表数据进行水平分库分表,拆分 2 个库,每个库 16 张表。并在新结构在演示常见的增删改查操作
+
+####
+
+#### 1. 创建水平的两个数据库
+
+```sql
+##创建水平的两个数据库
+create database geek_0;
+create database geek_1;
+```
+
+####
+
+#### 2. 分别在geek_0库和geek_1库建表,分别建t_order_${0..15}
+
+```sql
+CREATE TABLE `geek_0`.`t_order_15` (
+ `order_id` varchar(32) NOT NULL COMMENT '订单id',
+ `user_id` varchar(15) DEFAULT NULL COMMENT '用户id',
+ `amount` decimal(20,2) DEFAULT NULL COMMENT '付款金额',
+ `status` int(10) DEFAULT NULL COMMENT '订单状态',
+ `delivery_fee` int(10) DEFAULT NULL COMMENT '运费',
+ `order_time` datetime DEFAULT NULL COMMENT '生成时间',
+ `delivery_time` datetime DEFAULT NULL COMMENT '发货时间',
+ `complete_time` datetime DEFAULT NULL COMMENT '完成时间',
+ `close_time` datetime DEFAULT NULL COMMENT '交易关闭时间',
+ `modify_by` datetime DEFAULT NULL COMMENT '更改方',
+ `modify_time` datetime DEFAULT NULL COMMENT '更改时间',
+ PRIMARY KEY (`order_id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
+
+CREATE TABLE `geek_1`.`t_order_15` (
+ `order_id` varchar(32) NOT NULL COMMENT '订单id',
+ `user_id` varchar(15) DEFAULT NULL COMMENT '用户id',
+ `amount` decimal(20,2) DEFAULT NULL COMMENT '付款金额',
+ `status` int(10) DEFAULT NULL COMMENT '订单状态',
+ `delivery_fee` int(10) DEFAULT NULL COMMENT '运费',
+ `order_time` datetime DEFAULT NULL COMMENT '生成时间',
+ `delivery_time` datetime DEFAULT NULL COMMENT '发货时间',
+ `complete_time` datetime DEFAULT NULL COMMENT '完成时间',
+ `close_time` datetime DEFAULT NULL COMMENT '交易关闭时间',
+ `modify_by` datetime DEFAULT NULL COMMENT '更改方',
+ `modify_time` datetime DEFAULT NULL COMMENT '更改时间',
+ PRIMARY KEY (`order_id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
+```
+
+####
+
+#### 3. sharding配置
+
+```sql
+##sharding数据源配置
+dataSources:
+ ds_0:
+ url: jdbc:mysql://127.0.0.1:3306/geek_0?serverTimezone=UTC&useSSL=false
+ ds_1:
+ url: jdbc:mysql://127.0.0.1:3306/geek_1?serverTimezone=UTC&useSSL=false
+
+##规则配置
+rules:
+ tables:
+ t_order:
+ actualDataNodes: ds_${0..1}.t_order_${0..15}
+ tableStrategy:
+ standard:
+ shardingColumn: order_id
+ shardingAlgorithmName: t_order_inline
+ keyGenerateStrategy:
+ column: order_id
+ keyGeneratorName: snowflake
+
+ defaultDatabaseStrategy:
+ standard:
+ shardingColumn: user_id
+ shardingAlgorithmName: database_inline
+ defaultTableStrategy:
+ none:
+
+ shardingAlgorithms:
+ database_inline:
+ type: INLINE
+ props:
+ algorithm-expression: ds_${user_id % 2}
+ t_order_inline:
+ type: INLINE
+ props:
+ algorithm-expression: t_order_${order_id % 16}
+
+ keyGenerators:
+ snowflake:
+ type: SNOWFLAKE
+ props:
+ worker-id: 123
+```
+
+####
+
+#### 4. 测试
+
+> Windows通过mysql -h 127.0.0.1 -P 3307 -u root -p命令连接到ShardingSphere的虚拟数据库服务。
+
+#####
+
+##### 4.1 show tables;
+
+```sql
+show tables;
+```
+
+ShardingSphere-SQL - Actual SQL: ds_0 ::: show tables ShardingSphere-SQL - Actual SQL: ds_1 ::: show tables
+
+#####
+
+##### 4.2 插入操作
+
+```sql
+insert into t_order(user_id, status) values(1, 0),(1, 1);
+```
+
+ShardingSphere-SQL - Actual SQL: ds_1 ::: insert into t_order_0(user_id, status, order_id) values(1, 0, 603737938735902720)
+ShardingSphere-SQL - Actual SQL: ds_1 ::: insert into t_order_1(user_id, status, order_id) values(1, 1, 603737938735902721)
+
+```sql
+insert into t_order(user_id, status) values(2, 0),(2, 1);
+```
+
+ShardingSphere-SQL - Actual SQL: ds_0 ::: insert into t_order_1(user_id, status, order_id) values(2, 0, 603738400600076289)
+ShardingSphere-SQL - Actual SQL: ds_0 ::: insert into t_order_2(user_id, status, order_id) values(2, 1, 603738400600076290)
+
+#####
+
+##### 4.3 查询操作
+
+```
+select * from t_order;
+```
+
+ShardingSphere-SQL - Actual SQL: ds_0 ::: select * from t_order_0
+ShardingSphere-SQL - Actual SQL: ds_0 ::: select * from t_order_1
+ShardingSphere-SQL - Actual SQL: ds_0 ::: select * from t_order_2
+ShardingSphere-SQL - Actual SQL: ds_0 ::: select * from t_order_3
+ShardingSphere-SQL - Actual SQL: ds_0 ::: select * from t_order_4
+ShardingSphere-SQL - Actual SQL: ds_0 ::: select * from t_order_5
+ShardingSphere-SQL - Actual SQL: ds_0 ::: select * from t_order_6
+ShardingSphere-SQL - Actual SQL: ds_0 ::: select * from t_order_7
+ShardingSphere-SQL - Actual SQL: ds_0 ::: select * from t_order_8
+ShardingSphere-SQL - Actual SQL: ds_0 ::: select * from t_order_9
+ShardingSphere-SQL - Actual SQL: ds_0 ::: select * from t_order_10
+ShardingSphere-SQL - Actual SQL: ds_0 ::: select * from t_order_11
+ShardingSphere-SQL - Actual SQL: ds_0 ::: select * from t_order_12
+ShardingSphere-SQL - Actual SQL: ds_0 ::: select * from t_order_13
+ShardingSphere-SQL - Actual SQL: ds_0 ::: select * from t_order_14
+ShardingSphere-SQL - Actual SQL: ds_0 ::: select * from t_order_15
+
+ShardingSphere-SQL - Actual SQL: ds_1 ::: select * from t_order_0
+ShardingSphere-SQL - Actual SQL: ds_1 ::: select * from t_order_1
+ShardingSphere-SQL - Actual SQL: ds_1 ::: select * from t_order_2
+ShardingSphere-SQL - Actual SQL: ds_1 ::: select * from t_order_3
+ShardingSphere-SQL - Actual SQL: ds_1 ::: select * from t_order_4
+ShardingSphere-SQL - Actual SQL: ds_1 ::: select * from t_order_5
+ShardingSphere-SQL - Actual SQL: ds_1 ::: select * from t_order_6
+ShardingSphere-SQL - Actual SQL: ds_1 ::: select * from t_order_7
+ShardingSphere-SQL - Actual SQL: ds_1 ::: select * from t_order_8
+ShardingSphere-SQL - Actual SQL: ds_1 ::: select * from t_order_9
+ShardingSphere-SQL - Actual SQL: ds_1 ::: select * from t_order_10
+ShardingSphere-SQL - Actual SQL: ds_1 ::: select * from t_order_11
+ShardingSphere-SQL - Actual SQL: ds_1 ::: select * from t_order_12
+ShardingSphere-SQL - Actual SQL: ds_1 ::: select * from t_order_13
+ShardingSphere-SQL - Actual SQL: ds_1 ::: select * from t_order_14
+ShardingSphere-SQL - Actual SQL: ds_1 ::: select * from t_order_15
+
+```
+select * from t_order where order_id = 603738400600076289;
+```
+
+ShardingSphere-SQL - Actual SQL: ds_0 ::: select * from t_order_1 where order_id = 603738400600076289
+ShardingSphere-SQL - Actual SQL: ds_1 ::: select * from t_order_1 where order_id = 603738400600076289
+
+#####
+
+##### 4.4 修改操作
+
+```
+update t_order set status = 2 where user_id = 2;
+```
+
+ShardingSphere-SQL - Actual SQL: ds_0 ::: update t_order_0 set status = 2 where user_id = 2
+ShardingSphere-SQL - Actual SQL: ds_0 ::: update t_order_1 set status = 2 where user_id = 2
+ShardingSphere-SQL - Actual SQL: ds_0 ::: update t_order_2 set status = 2 where user_id = 2
+ShardingSphere-SQL - Actual SQL: ds_0 ::: update t_order_3 set status = 2 where user_id = 2
+ShardingSphere-SQL - Actual SQL: ds_0 ::: update t_order_4 set status = 2 where user_id = 2
+ShardingSphere-SQL - Actual SQL: ds_0 ::: update t_order_5 set status = 2 where user_id = 2
+ShardingSphere-SQL - Actual SQL: ds_0 ::: update t_order_6 set status = 2 where user_id = 2
+ShardingSphere-SQL - Actual SQL: ds_0 ::: update t_order_7 set status = 2 where user_id = 2
+ShardingSphere-SQL - Actual SQL: ds_0 ::: update t_order_8 set status = 2 where user_id = 2
+ShardingSphere-SQL - Actual SQL: ds_0 ::: update t_order_9 set status = 2 where user_id = 2
+ShardingSphere-SQL - Actual SQL: ds_0 ::: update t_order_10 set status = 2 where user_id = 2
+ShardingSphere-SQL - Actual SQL: ds_0 ::: update t_order_11 set status = 2 where user_id = 2
+ShardingSphere-SQL - Actual SQL: ds_0 ::: update t_order_12 set status = 2 where user_id = 2
+ShardingSphere-SQL - Actual SQL: ds_0 ::: update t_order_13 set status = 2 where user_id = 2
+ShardingSphere-SQL - Actual SQL: ds_0 ::: update t_order_14 set status = 2 where user_id = 2
+ShardingSphere-SQL - Actual SQL: ds_0 ::: update t_order_15 set status = 2 where user_id = 2
+
+#####
+
+##### 4.5 删除操作
+
+```
+delete from t_order where order_id = 603738400600076289;
+```
+
+ShardingSphere-SQL - Actual SQL: ds_0 ::: delete from t_order_1 where order_id = 603738400600076289 ShardingSphere-SQL - Actual SQL: ds_1 ::: delete from t_order_1 where order_id = 603738400600076289
+
+------
+
+**6.(必做)**基于 hmily TCC 或 ShardingSphere 的 Atomikos XA 实现一个简单的分布式事务应用 demo(二选一)
+
+基于hmily TCC实现分布式事务
+
+#### 1.微服务和数据库
+
+分别创建三个服务:订单服务——order_service、账户服务——account_service、库存服务——inventory_service,并注册到zookeeper
+
+```xml
+
+
+
+
+
+
+```
+
+```xml
+
+
+
+
+
+
+
+```
+
+```xml
+
+
+
+
+
+
+```
+
+
+分别创建三个服务对应的数据库:hmily_order、hmily_account、hmily_inventory
+
+```sql
+CREATE DATABASE IF NOT EXISTS `hmily_account` DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_bin ;
+
+USE `hmily_account`;
+
+CREATE TABLE `account` (
+ `id` bigint(20) NOT NULL AUTO_INCREMENT,
+ `user_id` varchar(128) NOT NULL,
+ `balance` decimal(10,0) NOT NULL COMMENT '用户余额',
+ `freeze_amount` decimal(10,0) NOT NULL COMMENT '冻结金额,扣款暂存余额',
+ `create_time` datetime NOT NULL,
+ `update_time` datetime DEFAULT NULL,
+ PRIMARY KEY (`id`)
+) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
+
+insert into `account`(`id`,`user_id`,`balance`,`freeze_amount`,`create_time`,`update_time`) values
+(1,'10000', 10000000,0,'2017-09-18 14:54:22',NULL);
+
+CREATE DATABASE IF NOT EXISTS `hmily_inventory` DEFAULT CHARACTER SET utf8mb4;
+
+USE `hmily_inventory`;
+
+CREATE TABLE `inventory` (
+ `id` bigint(20) NOT NULL AUTO_INCREMENT,
+ `product_id` VARCHAR(128) NOT NULL,
+ `total_inventory` int(10) NOT NULL COMMENT '总库存',
+ `lock_inventory` int(10) NOT NULL COMMENT '锁定库存',
+ PRIMARY KEY (`id`)
+) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
+
+insert into `inventory`(`id`,`product_id`,`total_inventory`,`lock_inventory`) values
+(1,'1',10000000,0);
+
+CREATE DATABASE IF NOT EXISTS `hmily_order` DEFAULT CHARACTER SET utf8mb4;
+
+USE `hmily_order`;
+
+CREATE TABLE `order` (
+ `id` bigint(20) NOT NULL AUTO_INCREMENT,
+ `create_time` datetime NOT NULL,
+ `number` varchar(20) COLLATE utf8mb4_bin NOT NULL,
+ `status` tinyint(4) NOT NULL,
+ `product_id` varchar(128) NOT NULL,
+ `total_amount` decimal(10,0) NOT NULL,
+ `count` int(4) NOT NULL,
+ `user_id` varchar(128) NOT NULL,
+ PRIMARY KEY (`id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
+```
+
+#### 2.订单逻辑实现
+
+##### 2.1 在OrderController暴露Restful的API供外部调用
+
+```java
+@RestController
+@RequestMapping("/order")
+public class OrderController {
+
+ private final OrderService orderService;
+
+ @Autowired
+ public OrderController(OrderService orderService) {
+ this.orderService = orderService;
+ }
+
+ @PostMapping(value = "/orderPay")
+ @ApiOperation(value = "订单支付接口(创建订单并进行支付扣减库存等操作)")
+ public String orderPay(@RequestParam(value = "count") Integer count,
+ @RequestParam(value = "amount") BigDecimal amount) {
+ final long start = System.currentTimeMillis();
+ orderService.orderPay(count, amount);
+ System.out.println("消耗时间为:" + (System.currentTimeMillis() - start));
+ return "";
+ }
+}
+```
+
+##### 2.2 OrderService实现订单创建逻辑,并通过rpc调用account服务以及inventory服务完成扣款和减库存的逻辑
+
+```java
+@Override
+public String orderPay(Integer count, BigDecimal amount) {
+ Order order = saveOrder(count, amount);
+ long start = System.currentTimeMillis();
+ paymentService.makePayment(order);
+ System.out.println("切面耗时:" + (System.currentTimeMillis() - start));
+ return "success";
+}
+```
+
+##### 2.3 在需要分布式事务控制的方法中开启hmily TCC事务
+
+此为order服务中的try阶段,除了对自身hmily_order数据库的操作,还涉及两个远程调用
+
+```java
+@Override
+@HmilyTCC(confirmMethod = "confirmOrderStatus", cancelMethod = "cancelOrderStatus")
+public void makePayment(Order order) {
+ updateOrderStatus(order, OrderStatusEnum.PAYING);
+
+ //扣除用户余额
+ accountService.payment(buildAccountDTO(order));
+ //进入扣减库存操作
+ inventoryService.decrease(buildInventoryDTO(order));
+}
+```
+
+定义confirmMethod和cancelMethod
+
+```java
+public void confirmOrderStatus(Order order) {
+ updateOrderStatus(order, OrderStatusEnum.PAY_SUCCESS);
+ LOGGER.info("=========进行订单confirm操作完成================");
+}
+
+public void cancelOrderStatus(Order order) {
+ updateOrderStatus(order, OrderStatusEnum.PAY_FAIL);
+ LOGGER.info("=========进行订单cancel操作完成================");
+}
+```
+
+##### 2.4 account服务中实现扣款操作
+
+同样需要hmily TCC事务进行控制
+
+```java
+@Override
+@HmilyTCC(confirmMethod = "confirm", cancelMethod = "cancel")
+public boolean payment(AccountDTO accountDTO) {
+ int count = accountMapper.update(accountDTO);
+ if (count > 0) {
+ return true;
+ } else {
+ throw new HmilyRuntimeException("账户扣减异常!");
+ }
+}
+```
+
+try阶段金额预留的方法
+
+```java
+@Update("update account set balance = balance - #{amount}," +
+ " freeze_amount= freeze_amount + #{amount} ,update_time = now()" +
+ " where user_id =#{userId} and balance >= #{amount} ")
+int update(AccountDTO accountDTO);
+```
+
+定义confirmMethod和cancelMethod
+
+```java
+@Transactional(rollbackFor = Exception.class)
+public boolean confirm(AccountDTO accountDTO) {
+ LOGGER.info("============dubbo tcc 执行确认付款接口===============");
+ accountMapper.confirm(accountDTO);
+ final int i = confrimCount.incrementAndGet();
+ LOGGER.info("调用了account confirm " + i + " 次");
+ return Boolean.TRUE;
+}
+
+@Transactional(rollbackFor = Exception.class)
+public boolean cancel(AccountDTO accountDTO) {
+ LOGGER.info("============ dubbo tcc 执行取消付款接口===============");
+ final AccountDO accountDO = accountMapper.findByUserId(accountDTO.getUserId());
+ accountMapper.cancel(accountDTO);
+ return Boolean.TRUE;
+}
+```
+
+confirm阶段和cancel阶段对应的真实(将freeze amount真实扣除)扣款和补偿的方法(将freeze amount补偿回balance)
+
+```java
+@Update("update account set " +
+ " freeze_amount= freeze_amount - #{amount}" +
+ " where user_id =#{userId} and freeze_amount >= #{amount} ")
+int confirm(AccountDTO accountDTO);
+
+@Update("update account set balance = balance + #{amount}," +
+ " freeze_amount= freeze_amount - #{amount} " +
+ " where user_id =#{userId} and freeze_amount >= #{amount}")
+int cancel(AccountDTO accountDTO);
+```
+
+##### 2.5 inventory服务实现减库存操作
+
+开启hmily TCC事务
+
+```java
+@Override
+@HmilyTCC(confirmMethod = "confirmMethod", cancelMethod = "cancelMethod")
+public Boolean decrease(InventoryDTO inventoryDTO) {
+ return inventoryMapper.decrease(inventoryDTO) > 0;
+}
+```
+
+try阶段预先减库存的操作,将需要减去的库存转入lock_inventory进行预留
+
+```java
+@Update("update inventory set total_inventory = total_inventory - #{count}," +
+ " lock_inventory= lock_inventory + #{count} " +
+ " where product_id =#{productId} and total_inventory > 0 ")
+int decrease(InventoryDTO inventoryDTO);
+```
+
+定义confirmMethod和cancelMethod
+
+```sql
+public Boolean confirmMethod(InventoryDTO inventoryDTO) {
+ LOGGER.info("==========调用扣减库存confirm方法===========");
+ inventoryMapper.confirm(inventoryDTO);
+ final int i = confirmCount.incrementAndGet();
+ LOGGER.info("调用了inventory confirm " + i + " 次");
+ return true;
+}
+
+public Boolean cancelMethod(InventoryDTO inventoryDTO) {
+ LOGGER.info("==========调用扣减库存取消方法===========");
+ inventoryMapper.cancel(inventoryDTO);
+ return true;
+}
+```
+
+真实减库存或补偿库存的方法
+
+```java
+@Update("update inventory set " +
+ " lock_inventory = lock_inventory - #{count} " +
+ " where product_id =#{productId} and lock_inventory > 0 ")
+int confirm(InventoryDTO inventoryDTO);
+
+@Update("update inventory set total_inventory = total_inventory + #{count}," +
+ " lock_inventory= lock_inventory - #{count} " +
+ " where product_id =#{productId} and lock_inventory > 0 ")
+int cancel(InventoryDTO inventoryDTO);
+```
+
+##### 2.6 模拟其中一个服务的事务失败,并回滚
+
+```java
+@Transactional(rollbackFor = Exception.class)
+public Boolean confirmMethodTimeout(InventoryDTO inventoryDTO) {
+ try {
+ //模拟延迟 当前线程暂停11秒
+ Thread.sleep(11000);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ LOGGER.info("==========调用扣减库存确认方法===========");
+ inventoryMapper.confirm(inventoryDTO);
+ return true;
+}
+```
\ No newline at end of file
diff --git a/07rpc/week09/week09 homework.md b/07rpc/week09/week09 homework.md
new file mode 100644
index 00000000..ae202dfc
--- /dev/null
+++ b/07rpc/week09/week09 homework.md
@@ -0,0 +1,13 @@
+**3.(必做)**改造自定义 RPC 的程序,提交到 GitHub:
+
+- 尝试将服务端写死查找接口实现类变成泛型和反射;
+- 尝试将客户端动态代理改成 AOP,添加异常处理;
+- 尝试使用 Netty+HTTP 作为 client 端传输方式。
+
+***
+
+**7.(必做)**结合 dubbo+hmily,实现一个 TCC 外汇交易处理,代码提交到 GitHub:
+
+- 用户 A 的美元账户和人民币账户都在 A 库,使用 1 美元兑换 7 人民币 ;
+- 用户 B 的美元账户和人民币账户都在 B 库,使用 7 人民币兑换 1 美元 ;
+- 设计账户表,冻结资产表,实现上述两个本地事务的分布式事务。
\ No newline at end of file