JAVA

Spring boot 2.0 + JPA 配置多数据源(非分布

2018-10-31  本文已影响0人  风中吃西瓜

在大型应用中对数据进行切分就会采用多个数据库实例进行管理,这样可以有效提高系统的水平伸缩性。而这样的方案就会不同于常见的单一数据实例的方案,这就要程序在运行时根据当时的请求及系统状态来动态的决定将数据存储在哪个数据库实例中,以及从哪个数据库提取数据。

多数据源,以数据库表为参照,大体上可以分成两大类情况:
一是,表级上的跨数据库。即,对于不同的数据库却有相同的表(表名和表结构完全相同)。
二是,非表级上的跨数据库。即,多个数据源不存在相同的表。
根据用户的选择,使用不同的数据源。

  1. 主数据源(默认第一数据源)事物正常,当切换到第二数据源时事物还是使用的第一数据源事物导致切换第二数据源失败。例如:对第一数据源进行删除操作,对第二数据源进行插入操作,此时结果则是,全部都是使用的第一数据源事务,操作的第一数据源的数据库。解决方法:使用分布式事务 spring-boot-starter-jta-atomikos
  2. 在一个方法中调用service类外一个方法时,被调用的方法无法触发AOP进行数据源切换 解决方法如下:
//this.insert(record); // 内嵌方法 这种方式直接调用无法触发AOP 切换数据源

AttachmentUploadingRecordService serviceTemp = applicationContext.getBean(AttachmentUploadingRecordServiceImpl.class);
 serviceTemp.insert(record); //正确的切换到 insert()上配置的数据源

下面直接上代码,切换数据源的时候如果遇到事物问题需要仔细检查事物配置

POM.xml

<dependencies>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-data-jpa</artifactId>
            <exclusions>
                <!-- 排除默认日志插件 -->
                <exclusion>
                    <groupId>org.springframework.boot</groupId>
                    <artifactId>spring-boot-starter-logging</artifactId>
                </exclusion>
            </exclusions>
        </dependency>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-web</artifactId>
            <exclusions>
                <!-- 排除默认日志插件 -->
                <exclusion>
                    <groupId>org.springframework.boot</groupId>
                    <artifactId>spring-boot-starter-logging</artifactId>
                </exclusion>
            </exclusions>
        </dependency>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-configuration-processor</artifactId>
            <optional>true</optional>
        </dependency>
        <dependency>
            <groupId>mysql</groupId>
            <artifactId>mysql-connector-java</artifactId>
            <scope>runtime</scope>
        </dependency>
        <dependency>
            <groupId>org.projectlombok</groupId>
            <artifactId>lombok</artifactId>
            <optional>true</optional>
        </dependency>
        <!-- druid 连接池 -->
        <dependency>
            <groupId>com.alibaba</groupId>
            <artifactId>druid</artifactId>
            <version>1.1.11</version>
        </dependency>
        <dependency>
            <groupId>com.alibaba</groupId>
            <artifactId>druid-spring-boot-starter</artifactId>
            <version>1.1.10</version>
        </dependency>
        <dependency>
            <groupId>com.alibaba</groupId>
            <artifactId>fastjson</artifactId>
            <version>1.2.49</version>
        </dependency>
        <!-- 使用log4j2 日志 -->
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-log4j2</artifactId>
        </dependency>
 <!--       <dependency>
            <groupId>org.slf4j</groupId>
            <artifactId>slf4j-log4j12</artifactId>
            <version>1.7.25</version>
            <scope>test</scope>
        </dependency>-->
        <!-- Log4j2 异步支持 -->
        <dependency>
            <groupId>com.lmax</groupId>
            <artifactId>disruptor</artifactId>
            <version>3.4.2</version>
        </dependency>
        <!-- sql 日志待参数输出 -->
        <dependency>
            <groupId>com.googlecode.log4jdbc</groupId>
            <artifactId>log4jdbc</artifactId>
            <version>1.2</version>
        </dependency>




        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-test</artifactId>
            <scope>test</scope>
            <exclusions>
                <!-- 排除默认日志插件 -->
                <exclusion>
                    <groupId>org.springframework.boot</groupId>
                    <artifactId>spring-boot-starter-logging</artifactId>
                </exclusion>
            </exclusions>
        </dependency>
    </dependencies>

application.yml

spring:
  datasource:
    # driver-class-name: com.mysql.jdbc.Driver   # 使用mysql驱动
    driver-class-name: net.sf.log4jdbc.DriverSpy  # 使用 log4jdbc sql日志会携带参数
    type: com.alibaba.druid.pool.DruidDataSource

    # 主库
    master:
      #url: jdbc:mysql://127.0.0.1:3307/jwwl_attachment?useUnicode=true&characterEncoding=utf8
      url: jdbc:log4jdbc:mysql://127.0.0.1:3306/jwwl_attachment?useUnicode=true&characterEncoding=utf8
      username: root
      password: root

    # 从库1数据源
    slave1:
      #url: jdbc:mysql://127.0.0.1:3306/jwwl_attachment?useUnicode=true&characterEncoding=utf8
      url: jdbc:log4jdbc:mysql://127.0.0.1:3307/jwwl_attachment?useUnicode=true&characterEncoding=utf8
      username: root
      password: root

    # 从库2数据源
    slave2:
      #url: jdbc:mysql://127.0.0.1:3307/jwwl_attachment?useUnicode=true&characterEncoding=utf8
      url: jdbc:log4jdbc:mysql://127.0.0.1:3307/jwwl_attachment?useUnicode=true&characterEncoding=utf8
      username: root
      password: root

    # 连接池的配置信息
    # 初始化大小,最小,最大
    initialSize: 8
    minIdle: 5
    maxActive: 20
    # 配置获取连接等待超时的时间
    maxWait: 60000
    # 配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒
    timeBetweenEvictionRunsMillis: 60000
    # 配置一个连接在池中最小生存的时间,单位是毫秒
    minEvictableIdleTimeMillis: 30000
    validationQuery: SELECT 1 FROM DUAL
    testWhileIdle: true
    testOnBorrow: false
    testOnReturn: false
    # 打开PSCache,并且指定每个连接上PSCache的大小
    poolPreparedStatements: false
    maxPoolPreparedStatementPerConnectionSize: 20
    # 配置监控统计拦截的filters,去掉后监控界面sql无法统计,'wall'用于防火墙
    filters: stat,wall,slf4j
    # 通过connectProperties属性来打开mergeSql功能;慢SQL记录
    connectionProperties: druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000,
    stat-view-servlet:
      allow: 127.0.0.1  # IP 白名单
      deny: 192.168.0.10 #  IP黑名单(共同存在时,deny优先于allow)
      enabled: true
      login-password: admin  # 控制台用户名和密码
      login-username: admin
      reset-enable: false
    web-stat-filter:
      enabled: true
      exclusions: '*.js,*.gif,*.jpg,*.png,*.css,*.ico,/druid/*'
    # 合并多个DruidDataSource的监控数据
    useGlobalDataSourceStat: true


  jpa:
    database: mysql
    generate-ddl: true
    show-sql: true
    database-platform: org.hibernate.dialect.MySQL5Dialect
    hibernate:
      ddl-auto: update
      # update:表示自动根据model对象来更新表结构,启动 hibernate 时会自动检查数据库,如果缺少表则自动建表;缺少列则自动添加列;
      # create: 启动hibernate时,自动删除原来的表,新建所有的表,所以每次启动后的以前数据都会丢失。
      # create-drop:应用停下来的时候,自动会把表和数据删掉、
      # none: 什么也不做;
      # validate:会验证类里的属性和表字段是否一致,不一致,则会报错;
      naming:
        physical-strategy: org.springframework.boot.orm.jpa.hibernate.SpringPhysicalNamingStrategy

TargetDataSource 创建目标数据源注解 拦截数据源的注解,可以设置在具体的类上,或者在具体的方法上

import java.lang.annotation.*;

/***
 *   目标数据源注解,注解在方法上指定数据源的名称
 *   设置拦截数据源的注解,可以设置在具体的类上,或者在具体的方法上,dataSource是当前数据源的一个别名用于标识我们的数据源的信息。
 */
@Target({ElementType.METHOD, ElementType.TYPE})
@Retention(RetentionPolicy.RUNTIME)
@Documented
public @interface TargetDataSource {
    String dataSource() default DataSourceType.MASTER;
}

配置Druid

import com.alibaba.druid.pool.DruidDataSource;
import com.alibaba.druid.support.http.StatViewServlet;
import com.alibaba.druid.support.http.WebStatFilter;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.web.servlet.FilterRegistrationBean;
import org.springframework.boot.web.servlet.ServletRegistrationBean;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.jdbc.datasource.DataSourceTransactionManager;
import org.springframework.orm.jpa.JpaTransactionManager;
import org.springframework.transaction.PlatformTransactionManager;
import org.springframework.transaction.annotation.EnableTransactionManagement;

import javax.persistence.EntityManagerFactory;
import javax.sql.DataSource;
import java.sql.SQLException;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;

/***
 *  配置Druid
 */
@Slf4j
@Configuration
@EnableTransactionManagement(proxyTargetClass = true) //开启事务管理的注解
@Data
public class DruidConfiguration {

    @Value("${spring.datasource.stat-view-servlet.allow}")
    private String servletAllow;
    @Value("${spring.datasource.stat-view-servlet.deny}")
    private String servletDeny;
    @Value("${spring.datasource.stat-view-servlet.enabled}")
    private Boolean servletEnabled;
    @Value("${spring.datasource.stat-view-servlet.login-password}")
    private String servletPassword;
    @Value("${spring.datasource.stat-view-servlet.login-username}")
    private String servletUsername;
    @Value("${spring.datasource.stat-view-servlet.reset-enable}")
    private String servletResetEnable;
    @Value("${spring.datasource.web-stat-filter.exclusions}")
    private String webStatExclusions;
    @Value("${spring.datasource.web-stat-filter.enabled}")
    private Boolean webStatEnabled;


    /**
     *  注册DruidServlet
     * @return
     */
    @Bean
    public ServletRegistrationBean druidServlet() {
        log.info("init Druid Servlet Configuration ");
        ServletRegistrationBean servletRegistrationBean = new ServletRegistrationBean(new StatViewServlet(), "/druid/*");
        // IP白名单
        servletRegistrationBean.addInitParameter("allow", servletAllow);
        // IP黑名单(共同存在时,deny优先于allow)
        servletRegistrationBean.addInitParameter("deny", servletDeny);
        //控制台管理用户
        servletRegistrationBean.addInitParameter("loginUsername", servletUsername);
        servletRegistrationBean.addInitParameter("loginPassword", servletPassword);
        //是否能够重置数据 禁用HTML页面上的“Reset All”功能
        servletRegistrationBean.addInitParameter("resetEnable", servletResetEnable);
        return servletRegistrationBean;
    }

    /**
     * 注册DruidFilter拦截
     * @return
     */
    @Bean
    public FilterRegistrationBean filterRegistrationBean() {
        FilterRegistrationBean filterRegistrationBean = new FilterRegistrationBean(new WebStatFilter());
        filterRegistrationBean.addUrlPatterns("/*");
        //设置忽略请求
        filterRegistrationBean.addInitParameter("exclusions", webStatExclusions);
        return filterRegistrationBean;
    }


    @Data
    @ConfigurationProperties(prefix = "spring.datasource")
    class DruidDataSourceProperties {

        // 主库 url 
        @Value("${spring.datasource.master.url}")
        private String masterUrl;
        // 主库 用户
        @Value("${spring.datasource.master.username}")
        private String masterUsername;
        // 主库 密码
        @Value("${spring.datasource.master.password}")
        private String masterPassword;

        // 从库1 url 
        @Value("${spring.datasource.slave1.url}")
        private String slaveOneUrl;
        // 从库1 用户
        @Value("${spring.datasource.slave1.username}")
        private String slaveOneUsername;
        // 从库1 密码
        @Value("${spring.datasource.slave1.password}")
        private String slaveOnePassword;

        // 从库2 url 
        @Value("${spring.datasource.slave2.url}")
        private String slaveTwoUrl;
        // 从库2 用户
        @Value("${spring.datasource.slave2.username}")
        private String slaveTwoUsername;
        // 从库2 密码
        @Value("${spring.datasource.slave2.password}")
        private String slaveTwoPassword;

        private String driverClassName;
        private Integer initialSize;
        private Integer minIdle;
        private Integer maxActive;
        private Integer maxWait;
        private Integer timeBetweenEvictionRunsMillis;
        private Integer minEvictableIdleTimeMillis;
        private String validationQuery;
        private Boolean testWhileIdle;
        private Boolean testOnBorrow;
        private Boolean testOnReturn;
        private Boolean poolPreparedStatements;
        private Integer maxPoolPreparedStatementPerConnectionSize;
        private String filters;
        private String connectionProperties;
        private Boolean useGlobalDataSourceStat;

        /**
         *  声明主库 Bean实例
         *
         * @return
         */
        @Bean(value = "masterDataSource", destroyMethod = "close")
        public DataSource masterDataSource() {
            return getDataSource(masterUrl, masterUsername, masterPassword);
        }

        /**
         *  声明从库1 Bean实例
         *
         * @return
         */
        @Bean(value = "slaveOneDataSource", destroyMethod = "close")
        public DataSource slaveOneDataSource() {
            return getDataSource(slaveOneUrl, slaveOneUsername, slaveOnePassword);
        }

        /**
         *  声明从库2 Bean实例
         *
         * @return
         */
        @Bean(value = "slaveTwoDataSource", destroyMethod = "close")
        public DataSource slaveTwoDataSource() {
            return getDataSource(slaveTwoUrl, slaveTwoUsername, slaveTwoPassword);
        }

        /**
         * 多个从库
         *
         * @return
         */
        @Bean(value = "slaveDataSource")
        public ConcurrentHashMap<String, DataSource> slaveDataSources() {
            ConcurrentHashMap<String, DataSource> dataSources = new ConcurrentHashMap<>();
            dataSources.put(DataSourceType.SLAVE_0, slaveOneDataSource());
            dataSources.put(DataSourceType.SLAVE_1, slaveTwoDataSource());
            return dataSources;
        }

        /**
         * 设置 AbstractRoutingDataSource 包装多个数据源
         *
         * @return
         */
        @Bean(name = "dataSource")
        @Primary
        public MultipleDataSourceRouting dataSource() throws SQLException{
            //按照目标数据源名称和目标数据源对象的映射存放在Map中
            Map<String, DataSource> targetDataSources = new ConcurrentHashMap<>();
            targetDataSources.put(DataSourceType.MASTER, masterDataSource());
            slaveDataSources().forEach((k, v) -> {
                targetDataSources.put(k, v);
            });
            //采用是想AbstractRoutingDataSource的对象包装多数据源
            return new MultipleDataSourceRouting(masterDataSource(), targetDataSources);
        }


        /**
         *  DataSource 事物
         *
         * @param dataSource
         * @return
         */
        @Bean
        @Primary
        public PlatformTransactionManager transactionManager(DataSource dataSource) throws SQLException{
            dataSource = dataSource();
            return new DataSourceTransactionManager(dataSource);
        }

        /**
         * jpa 事物
         *
         * @param entityManagerFactory
         * @return
         */
        @Bean
        public PlatformTransactionManager txManager(EntityManagerFactory entityManagerFactory) throws SQLException{
            return new JpaTransactionManager(entityManagerFactory);
        }


        /**
         *   配置DruidDataSource
         *
         * @param url
         * @param username
         * @param password
         * @return
         */
        public DataSource getDataSource(String url, String username, String password) {
            DruidDataSource datasource = new DruidDataSource();
            datasource.setUrl(url);
            datasource.setUsername(username);
            datasource.setPassword(password);
            datasource.setDriverClassName(driverClassName);

            //configuration
            datasource.setInitialSize(initialSize);
            datasource.setMinIdle(minIdle);
            datasource.setMaxActive(maxActive);
            datasource.setMaxWait(maxWait);
            datasource.setTimeBetweenEvictionRunsMillis(timeBetweenEvictionRunsMillis);
            datasource.setMinEvictableIdleTimeMillis(minEvictableIdleTimeMillis);
            datasource.setValidationQuery(validationQuery);
            datasource.setTestWhileIdle(testWhileIdle);
            datasource.setTestOnBorrow(testOnBorrow);
            datasource.setTestOnReturn(testOnReturn);
            datasource.setPoolPreparedStatements(poolPreparedStatements);
            datasource.setMaxPoolPreparedStatementPerConnectionSize(maxPoolPreparedStatementPerConnectionSize);
            datasource.setUseGlobalDataSourceStat(useGlobalDataSourceStat);
            try {
                datasource.setFilters(filters);
            } catch (SQLException e) {
                e.printStackTrace();
                log.error("druid configuration initialization filter: " + e);
            }
            datasource.setConnectionProperties(connectionProperties);
            return datasource;
        }

    }

}

DataSourceType 数据源类型枚举


import java.util.concurrent.CopyOnWriteArrayList;

/***
 *  DataSourceType
 */
public enum  DataSourceType {

    READ("SLAVE", "从库"),
    WRITE("MASTER", "主库");

    public static final  String SLAVE = "slaveDataSource";
    public static final  String SLAVE_0 = "slaveOneDataSource";
    public static final  String SLAVE_1 = "slaveTwoDataSource";
    public static final  String MASTER = "masterDataSource";

    // 存放多个从库 key  用于做负载均衡
    public static CopyOnWriteArrayList<String> slaveDataSources = new CopyOnWriteArrayList<>();

    static {
        slaveDataSources.add(SLAVE_0);
        slaveDataSources.add(SLAVE_1);
    }

    private String type;
    private String name;

    DataSourceType(String type, String name) {
        this.type = type;
        this.name = name;
    }

    /**
     *  获取从库 数据源信息
     * @param number
     */
   public static String getSlave(Integer number){
       return slaveDataSources.get(number);
    }

    public String getType() {
        return type;
    }

    public void setType(String type) {
        this.type = type;
    }

    public String getName() {
        return name;
    }

    public void setName(String name) {
        this.name = name;
    }
}

DataSourceContextHolder 动态数据源持有者 标记不同数据源


/***
 * 动态数据源持有者,负责利用ThreadLocal存取数据源名称
 * 标记不同数据源 (将不同的数据源标识记录在ThreadLocal中)
 */
public class DataSourceContextHolder {

    /**
     * 本地线程共享对象
     */
    private static final ThreadLocal<String> CONTEXT_HOLDER_THREAD_LOCAL = new ThreadLocal<>();

    ThreadLocal<String> getDataSouceThreadLocal() {
        return CONTEXT_HOLDER_THREAD_LOCAL;
    }

    /**
     * 提供给AOP去设置当前的线程的数据源的信息 
     * @param datasource
     */
    public static void putDataSource(String datasource) {
        CONTEXT_HOLDER_THREAD_LOCAL.set(datasource);
    }

    /**
     * 提供给AOP去设置当前的线程的数据源的信息  从库数据源 从库可能有多个
     */
    public static void read() {
        CONTEXT_HOLDER_THREAD_LOCAL.set(DataSourceType.SLAVE);
    }

    /**
     * 提供给AOP去设置当前的线程的数据源的信息 主库数据源
     */
    public static void write() {
        CONTEXT_HOLDER_THREAD_LOCAL.set(DataSourceType.MASTER);
    }

    /**
     *  提供给AbstractRoutingDataSource的实现类,通过key选择数据源
     * @return
     */
    public static String getDataSource() {
        return CONTEXT_HOLDER_THREAD_LOCAL.get();
    }

    /**
     *
     */
    public static void clear() {
        CONTEXT_HOLDER_THREAD_LOCAL.remove();
    }

}

MultipleDataSourceRouting 多数据源 切换 根据标识获取不同源


import com.alibaba.druid.util.StringUtils;
import lombok.extern.slf4j.Slf4j;
import org.springframework.jdbc.datasource.lookup.AbstractRoutingDataSource;

import javax.sql.DataSource;
import java.util.HashMap;
import java.util.Map;


/***
 *  多数据源 切换  根据标识获取不同源
 *  其他说明:  通过扩展AbstractRoutingDataSource来获取不同的源。它是Spring提供的一个可以根据用户发起的不同请求去转换不同的数据源,比如根据用户的不同地区语言选择不同的数据库。通过查看源码可以发现,它是通过determineCurrentLookupKey()返回的不同key到sqlSessionFactory中获取不同源
 */
@Slf4j
public class MultipleDataSourceRouting extends AbstractRoutingDataSource {


    public MultipleDataSourceRouting(DataSource defaultTargetDataSource, Map<String, DataSource> targetDataSources) {
        //设置默认的数据源,当拿不到数据源时,使用此配置
        super.setDefaultTargetDataSource(defaultTargetDataSource);
        super.setTargetDataSources(new HashMap<>(targetDataSources));
        // 必须 将 targetDataSources 的 DataSource 加载到 resolvedDataSources
        super.afterPropertiesSet();
    }

    /**
     * 根据Key获取数据源名称
     * @return
     */
    @Override
    protected Object determineCurrentLookupKey() {
        String lookupKey = DataSourceContextHolder.getDataSource();
        log.info("执行多数据源 切换  当前数据源:" + (StringUtils.isEmpty(lookupKey) ? "null 使用默认数据源" : lookupKey));
        return lookupKey;
    }


}

DataSourceAspect AOP拦截特定的注解去动态的切换数据源


import lombok.extern.slf4j.Slf4j;
import org.aspectj.lang.JoinPoint;
import org.aspectj.lang.annotation.After;
import org.aspectj.lang.annotation.Aspect;
import org.aspectj.lang.annotation.Before;
import org.aspectj.lang.annotation.Pointcut;
import org.aspectj.lang.reflect.MethodSignature;
import org.springframework.context.annotation.EnableAspectJAutoProxy;
import org.springframework.core.annotation.Order;
import org.springframework.stereotype.Component;

import java.lang.reflect.Method;
import java.util.concurrent.atomic.AtomicInteger;

/***
 * AOP拦截特定的注解去动态的切换数据源
 */
@Aspect
@Component
@Order(-1) //保证该AOP在@Transactional之前执行
@EnableAspectJAutoProxy(proxyTargetClass = true)
@Slf4j
public class DataSourceAspect {

    private AtomicInteger count = new AtomicInteger(0);

    //切换放在dao接口的方法上,所以这里要配置AOP切面的切入点
    //增加api 控制器切入点,是因为动态数据源切换需要在事务开启前执行,故需要在service前切换
    //@within在类上设置
    //@annotation在方法上进行设置
    @Pointcut("@within(com.example.database.datasource.TargetDataSource) || @annotation(com.example.database.datasource.TargetDataSource)")
    public void dataSourcePointCut() {}

    /**
     *
     * @param joinPoint
     */
    @Before("dataSourcePointCut()")
    public void doBefore(JoinPoint joinPoint)
    {
        Method method = ((MethodSignature) joinPoint.getSignature()).getMethod();
        TargetDataSource annotationClass = method.getAnnotation(TargetDataSource.class); //获取方法上的注解
        if (annotationClass == null) {
            annotationClass = joinPoint.getTarget().getClass().getAnnotation(TargetDataSource.class); //获取类上面的注解
            if (annotationClass == null) return;
        }
        //获取注解上的数据源的值的信息
        String dataSourceKey = annotationClass.dataSource();
        String dataSourceMsg = null;
        if (dataSourceKey != null && dataSourceKey.trim().equals(DataSourceType.SLAVE)){
            // 设置从库
            //DataSourceContextHolder.read();
            // 读 简单负载均衡
            int number = count.getAndAdd(1);
            int dataSourceNumber = DataSourceType.slaveDataSources.size();
            int lookupKey = number % dataSourceNumber;
            String key = DataSourceType.getSlave(lookupKey);
            DataSourceContextHolder.putDataSource(key);
            dataSourceMsg = key;
        } else if (dataSourceKey != null) {
            DataSourceContextHolder.putDataSource(dataSourceKey);
            dataSourceMsg = dataSourceKey;
        } else {
            // 设置 主库
            DataSourceContextHolder.write();
            dataSourceMsg = DataSourceType.MASTER;
        }
        log.info("AOP动态切换数据源,className:" + joinPoint.getTarget().getClass().getName() + "." + method.getName() + ";使用数据源:" + dataSourceMsg);
    }

    /**
     *  执行完切面后,将线程共享中的数据源名称清空
     * @param point
     */
    @After("dataSourcePointCut()")
    public void after(JoinPoint point) {
        //清理掉当前设置的数据源,让默认的数据源不受影响
        DataSourceContextHolder.clear();
    }
}

service 中具体使用方法

   @TargetDataSource(dataSource = DataSourceType.SLAVE)  //指定特定的数据源
    @Override
    public List<AttachmentUploadingRecord> findAllById(List<Long> ids) {
        return attachmentUploadingRecordRepository.findAllById(ids);
    }
 @TargetDataSource(dataSource = DataSourceType.SLAVE)
    @Override
    public int delete(Long id) throws Exception {
        try {
            attachmentUploadingRecordRepository.deleteById(id);
            AttachmentUploadingRecord record = new AttachmentUploadingRecord();
            Random random = new Random(1000);
            int number = random.nextInt();
            record.setAttachmentName("20180828170511301217614" + number + ".docx");
            record.setAttachmentPostfix(".docx");
            record.setAttachmentOriginalName("通知-" + number + ".docx");
            //this.insert(record); // 内嵌方法 这种方式直接调用无法触发AOP 切换数据源

            //使用下面方式才能在内嵌方法中触发AOP 切换数据源
            AttachmentUploadingRecordService serviceTemp = applicationContext.getBean(AttachmentUploadingRecordServiceImpl.class);
            serviceTemp.insert(record); //正确的切换到 insert()上配置的数据源
   
            return  1;
        } catch (Exception e){
            e.printStackTrace();
            log.error("删除出现异常.");
            throw new Exception("抛异常了");

        }
    }

Repository中具体使用方法

  
    @TargetDataSource(dataSource = DataSourceType.MASTER)  //使用指定数据源
    List<AttachmentUploadingRecord> findByAttachmentType(Byte attachmentType);

不配置TargetDataSource 注解 则使用默认的 masterDataSource数据源

上一篇下一篇

猜你喜欢

热点阅读