防抖/节流(java)
- 我们经常需要在回调比较频繁的地方过滤掉一些回调,否则频繁处理会极大影响性能,这就需要对频繁回调的方法进行节流,这部分其实Rxjava已经有现成的方法了,但如果仅仅为了使用这些方法就引入rxjava库,又显得太庞大了
分析
- 一般我们的使用场景可以分为如下四类:
- 1 在指定时间内点击指定次数则执行逻辑
- 2 在指定时间内只执行第一次回调的逻辑
- 3 在指定时间内只执行最后一次回调的逻辑
- 4 在指定时间内只执行第一次和最后一次回调的逻辑
- 第一二种都比较好实现,就是这个最后一次比较难处理,试想下你怎么知道在某段时间内是否是最后一次回调,这好像是做不到的
- 那么就只能换个思路,先把回调都缓存起来(新回调覆盖旧回调),然后在指定时间后取这个缓存进行触发即可
- 但是这种方法有个严重问题,就是必须等到指定时间后才会触发,比如只回调一次的话,我们肯定希望立马就执行处理逻辑,但是在时间未到的情况下,我们并不知道这是最后一次回调,因此程序就只能等到时间到了再执行处理逻辑,这就导致执行逻辑会延后处理,这种方法虽然有延后的问题,但只要时间不要设得太大,也是可以接受的
- 以下就提供纯java实现,也已经发布到码云上了,欢迎使用
import java.lang.ref.WeakReference;
import java.util.HashMap;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
public class ThrottleUtil {
private static ScheduledExecutorService mScheduledExecutorService;
private static volatile boolean mIsInit = false;
private static HashMap<Long, Long> mCacheLastTime = new HashMap<>();
private static HashMap<Long, Boolean> mCacheSchedule = new HashMap<>();
private static HashMap<Long, CallbackParam> mCacheCallback = new HashMap<>();
private static HashMap<Long, Integer> mCacheCount = new HashMap<>();
private static void init() {
if (mIsInit) {
return;
}
mScheduledExecutorService = new ScheduledThreadPoolExecutor(1, new ThreadPoolExecutor.DiscardPolicy());
mIsInit = true;
}
public static void config(ScheduledExecutorService scheduledExecutorService) {
if (scheduledExecutorService == null) {
return;
}
mScheduledExecutorService = scheduledExecutorService;
}
public static boolean throttleFirst(long ms) {
return throttleFirst(ms, Thread.currentThread().getId());
}
public static synchronized boolean throttleFirst(long ms, long groupKey) {
if (mCacheLastTime.get(groupKey) == null) {
mCacheLastTime.put(groupKey, 0L);
}
long now = System.currentTimeMillis();
if (now - mCacheLastTime.get(groupKey) > ms) {
mCacheLastTime.put(groupKey, now);
return true;
}
return false;
}
public static synchronized boolean throttleCount(int count, long ms) {
return throttleCount(count, ms, Thread.currentThread().getId());
}
public static synchronized boolean throttleCount(int count, long ms, long groupKey) {
if (mCacheLastTime.get(groupKey) == null) {
mCacheLastTime.put(groupKey, 0L);
}
if (mCacheCount.get(groupKey) == null) {
mCacheCount.put(groupKey, 0);
}
long now = System.currentTimeMillis();
if (mCacheCount.get(groupKey) == 0) {
mCacheLastTime.put(groupKey, now);
}
if (now - mCacheLastTime.get(groupKey) < ms) {
int tmp = mCacheCount.get(groupKey);
tmp++;
mCacheCount.put(groupKey, tmp);
if (tmp >= count) {
mCacheCount.put(groupKey, 0);
return true;
}
} else {
mCacheCount.put(groupKey, 0);
}
return false;
}
public static void throttleFirst(Runnable callback, long ms) {
throttleFirst(callback, ms, Thread.currentThread().getId());
}
public static void throttleFirst(Runnable callback, long ms, long groupKey) {
if (throttleFirst(ms, groupKey)) {
callback.run();
}
}
public static <T> void throttleFirst(ICallback<T> callback, T userData, long ms, long groupKey) {
if (throttleFirst(ms, groupKey)) {
callback.callback(userData);
}
}
public static void throttleLast(Runnable callback, long ms) {
throttleLast(callback, ms, Thread.currentThread().getId());
}
public static synchronized void throttleLast(Runnable callback, long ms, long groupKey) {
throttleLast(userData -> callback.run(), null, ms, groupKey);
}
public static synchronized <T> void throttleLast(ICallback<T> callback, T userData, long ms, long groupKey) {
if (mCacheSchedule.get(groupKey) == null) {
mCacheSchedule.put(groupKey, false);
}
if (!mIsInit) {
init();
}
mCacheCallback.put(groupKey, new CallbackParam(callback, userData));
if (!mCacheSchedule.get(groupKey)) {
mCacheSchedule.put(groupKey, true);
mScheduledExecutorService.schedule(() -> {
mCacheSchedule.put(groupKey, false);
CallbackParam cp = mCacheCallback.remove(groupKey);
if (cp != null && cp.iCallback != null && cp.iCallback.get() instanceof ICallback) {
((ICallback) cp.iCallback.get()).callback(cp.userData);
}
}, ms, TimeUnit.MILLISECONDS);
}
}
public static void throttleFirstAndLast(Runnable callback, long ms) {
throttleFirstAndLast(callback, ms, Thread.currentThread().getId());
}
public static synchronized void throttleFirstAndLast(Runnable callback, long ms, long groupKey) {
throttleFirstAndLast(userData -> callback.run(), null, ms, groupKey);
}
public static synchronized <T> void throttleFirstAndLast(ICallback<T> callback, T userData, long ms, long groupKey) {
if (callback == null) {
return;
}
if (mCacheSchedule.get(groupKey) == null) {
mCacheSchedule.put(groupKey, false);
}
if (mCacheCount.get(groupKey) == null) {
mCacheCount.put(groupKey, 0);
}
if (!mIsInit) {
init();
}
if (throttleFirst(ms)) {
mCacheCount.put(groupKey, 0);
callback.callback(userData);
} else {
mCacheCount.put(groupKey, mCacheCount.get(groupKey) + 1);
}
mCacheCallback.put(groupKey, new CallbackParam(callback, userData));
if (!mCacheSchedule.get(groupKey)) {
mCacheSchedule.put(groupKey, true);
mScheduledExecutorService.schedule(() -> {
mCacheSchedule.put(groupKey, false);
if (mCacheCount.get(groupKey) != 0) {
CallbackParam cp = mCacheCallback.remove(groupKey);
if (cp != null && cp.iCallback != null && cp.iCallback.get() instanceof ICallback) {
((ICallback) cp.iCallback.get()).callback(cp.userData);
}
}
}, ms, TimeUnit.MILLISECONDS);
}
}
public interface ICallback<T> {
void callback(T userData);
}
public static class CallbackParam<T> {
public WeakReference<ICallback> iCallback;
public T userData;
public CallbackParam(ICallback iCallback, T userData) {
this.iCallback = new WeakReference(iCallback);
this.userData = userData;
}
}
}
参考