Skip to content

Commit 499e3e3

Browse files
committed
update
1 parent f31d288 commit 499e3e3

File tree

17 files changed

+276
-0
lines changed

17 files changed

+276
-0
lines changed

code/my_tensorflow/Untitled.ipynb

Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,98 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "code",
5+
"execution_count": 49,
6+
"metadata": {},
7+
"outputs": [
8+
{
9+
"name": "stdout",
10+
"output_type": "stream",
11+
"text": [
12+
"1\n"
13+
]
14+
}
15+
],
16+
"source": [
17+
"import numpy as np\n",
18+
"import tensorflow as tf\n",
19+
"from src.utils import get_wb, foo\n",
20+
"\n",
21+
"from src.layers import dense\n",
22+
"\n",
23+
"foo()"
24+
]
25+
},
26+
{
27+
"cell_type": "code",
28+
"execution_count": 52,
29+
"metadata": {},
30+
"outputs": [],
31+
"source": [
32+
"tf.reset_default_graph()\n",
33+
"\n",
34+
"x = tf.constant(np.arange(16, dtype=np.float32).reshape([4,4]))\n",
35+
"\n",
36+
"o = dense(x, 16)"
37+
]
38+
},
39+
{
40+
"cell_type": "code",
41+
"execution_count": 33,
42+
"metadata": {},
43+
"outputs": [
44+
{
45+
"name": "stdout",
46+
"output_type": "stream",
47+
"text": [
48+
"(11, 12)\n",
49+
"(12,)\n"
50+
]
51+
}
52+
],
53+
"source": [
54+
"sess = tf.Session()\n",
55+
"sess.run(tf.global_variables_initializer())"
56+
]
57+
},
58+
{
59+
"cell_type": "code",
60+
"execution_count": 36,
61+
"metadata": {
62+
"collapsed": true
63+
},
64+
"outputs": [],
65+
"source": []
66+
},
67+
{
68+
"cell_type": "code",
69+
"execution_count": null,
70+
"metadata": {
71+
"collapsed": true
72+
},
73+
"outputs": [],
74+
"source": []
75+
}
76+
],
77+
"metadata": {
78+
"kernelspec": {
79+
"display_name": "Python 3",
80+
"language": "python",
81+
"name": "python3"
82+
},
83+
"language_info": {
84+
"codemirror_mode": {
85+
"name": "ipython",
86+
"version": 3
87+
},
88+
"file_extension": ".py",
89+
"mimetype": "text/x-python",
90+
"name": "python",
91+
"nbconvert_exporter": "python",
92+
"pygments_lexer": "ipython3",
93+
"version": "3.6.1"
94+
}
95+
},
96+
"nbformat": 4,
97+
"nbformat_minor": 2
98+
}

code/my_tensorflow/main.py

Whitespace-only changes.

code/my_tensorflow/src/__init__.py

Whitespace-only changes.

code/my_tensorflow/src/activations/__init__.py

Whitespace-only changes.

code/my_tensorflow/src/activations/relu.py

Whitespace-only changes.

code/my_tensorflow/src/activations/selu.py

Whitespace-only changes.

code/my_tensorflow/src/activations/softmax.py

Whitespace-only changes.
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
from .dense import *
2+
from .highway import *
Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
"""
2+
全连接层
3+
4+
References:
5+
tensorlayer.layers.DenseLayer
6+
"""
7+
8+
import tensorflow as tf
9+
10+
from ..utils import get_wb
11+
from ..activations import relu
12+
13+
14+
def dense(x, n_unit, act_fn=relu, name=None):
15+
"""
16+
Args:
17+
x: need `tf.Tensor` to use `x.get_shape()`
18+
n_unit(int):
19+
act_fn:
20+
name(str):
21+
"""
22+
# n_in = tf.shape(x)[-1] # err: need int but tensor
23+
n_in = int(x.get_shape()[-1])
24+
with tf.variable_scope(name or "dense"):
25+
W, b = get_wb([n_in, n_unit])
26+
o = act_fn(tf.matmul(x, W) + b)
27+
return o
28+

code/my_tensorflow/src/layers/highway.py

Whitespace-only changes.

code/my_tensorflow/src/regularizers/L1L2.py

Whitespace-only changes.

code/my_tensorflow/src/regularizers/__init__.py

Whitespace-only changes.
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
"""
2+
工具函数
3+
"""
4+
import tensorflow as tf
5+
from ..regularizers import l2_regularizer
6+
7+
tf_dtype = tf.float32
8+
zeros = tf.initializers.zeros
9+
truncated_normal = tf.initializers.truncated_normal
10+
11+
12+
def foo():
13+
print(1)
14+
15+
16+
def get_wb(shape,
17+
w_initializer=truncated_normal,
18+
b_initializer=zeros,
19+
w_regularizer=l2_regularizer,
20+
b_regularizer=l2_regularizer):
21+
""""""
22+
n_in, n_unit = shape
23+
W = tf.get_variable('W', shape=[n_in, n_unit],
24+
dtype=tf_dtype, initializer=truncated_normal, regularizer=l2_regularizer)
25+
b = tf.get_variable('b', shape=[n_unit],
26+
dtype=tf_dtype, initializer=zeros, regularizer=l2_regularizer)
27+
return W, b
28+
29+
30+
def get_w(shape,
31+
w_initializer=truncated_normal,
32+
w_regularizer=l2_regularizer):
33+
n_in, n_unit = shape
34+
W = tf.get_variable('W', [n_in, n_unit], dtype=tf_dtype, initializer=w_initializer, regularizer=w_regularizer)
35+
return W

code/tf_layers/dense.py

Whitespace-only changes.

code/tf_layers/highway.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
"""Highway network
2+
3+
References:
4+
fomorians/highway-fcn https://github.com/fomorians/highway-fcn/blob/master/main.py
5+
"""
6+
7+
import tensorflow as tf
8+
from keras.layers import *
9+
import keras.backend as K
10+
from keras.layers.convolutional import _Conv
11+
12+
13+
def highway_layer():
14+
""""""

papers/[1505]-Highway.pdf

906 KB
Binary file not shown.

算法/备忘-必备算法.md

Lines changed: 99 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,99 @@
1+
备忘-必备算法
2+
===
3+
- 一些必备算法,主要是 C++ 版本
4+
Index
5+
---
6+
<!-- TOC -->
7+
8+
- [二分查找](#二分查找)
9+
- [离散版](#离散版)
10+
- [`my_binary_search(vector<int>, int)`](#my_binary_searchvectorint-int)
11+
- [`my_lower_bound(vector<int>, int)`](#my_lower_boundvectorint-int)
12+
- [`my_upper_bound(vector<int>, int)`](#my_upper_boundvectorint-int)
13+
- [排序](#排序)
14+
- [堆排序](#堆排序)
15+
- [建堆的时间复杂度](#建堆的时间复杂度)
16+
17+
<!-- /TOC -->
18+
19+
20+
## 二分查找
21+
22+
### 离散版
23+
24+
#### `my_binary_search(vector<int>, int)`
25+
- 没有重复元素时,目标值若存在,则返回索引;若不存在,返回 -1
26+
- 存在重复元素时,目标值若存在,则返回最小索引;若不存在,返回 -1
27+
```C++
28+
int my_binary_search(vector<int>& nums, int v) {
29+
if (nums.size() < 1) return - 1;
30+
31+
int lo = -1, hi = nums.size(); // hi = nums.size() - 1
32+
33+
while (hi - lo > 1) {
34+
int mid = lo + (hi - lo) / 2;
35+
if (nums[mid] < v)
36+
lo = mid;
37+
else
38+
hi = mid;
39+
}
40+
41+
return nums[lo + 1] == v ? lo + 1 : -1;
42+
}
43+
```
44+
45+
#### `my_lower_bound(vector<int>, int)`
46+
- 返回大于、等于目标值的最小索引(第一个大于或等于目标值的索引)
47+
```C++
48+
int my_lower_bound(vector<int>& nums, int v) {
49+
if (nums.size() < 1) return -1;
50+
51+
int lo = -1, hi = nums.size(); // hi = nums.size() - 1
52+
53+
while (hi - lo > 1) { // 退出循环时有:lo + 1 == hi
54+
int mid = lo + (hi - lo) / 2;
55+
if (nums[mid] < v)
56+
lo = mid; // 因为始终将 lo 端当做开区间,所以没有必要 `lo = mid + 1;`
57+
else
58+
hi = mid; // 而在 else 中,mid 可能就是最后的结果,所以不能 `hi = mid - 1`
59+
}
60+
61+
return lo + 1; // 相比 binary_search,只有返回值不同
62+
}
63+
```
64+
- **为什么返回 `lo + 1`**
65+
- 模板开始时将 (lo, hi) 看做是一个开区间,通过不断二分,最终这个区间中只会含有一个值,即 (lo, hi]
66+
- 返回 lo+1 的含义是,结果就在 lo 的下一个;
67+
- 在迭代的过程中,hi 会从开区间变为闭区间,而 lo 始终是开区间,返回 lo+1 显得更加统一。
68+
- 当然,这跟迭代的写法是相关的,你也可以使最终的结果区间是 [lo, hi),这取决于个人习惯。
69+
70+
#### `my_upper_bound(vector<int>, int)`
71+
- 返回大于目标值的最小索引(第一个大于目标值的索引)
72+
```C++
73+
int my_upper_bound(vector<int>& nums, int v) {
74+
if (nums.size() < 1) return -1;
75+
76+
int lo = -1, hi = nums.size(); // hi = nums.size() - 1
77+
78+
while (hi - lo > 1) {
79+
int mid = lo + (hi - lo) / 2;
80+
81+
if (nums[mid] <= v) // 相比 lower_bound,唯一不同点:`<` -> `<=`
82+
lo = mid;
83+
else
84+
hi = mid;
85+
}
86+
87+
return lo + 1;
88+
}
89+
```
90+
91+
## 排序
92+
93+
### 堆排序
94+
```C++
95+
96+
```
97+
98+
#### 建堆的时间复杂度
99+
> [为什么建立一个二叉堆的时间为O(N)而不是O(Nlog(N))?](https://www.zhihu.com/question/264693363/answer/291397356) - 知乎

0 commit comments

Comments
 (0)